aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnxt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h668
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c444
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c220
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1190
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2147
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h75
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h2703
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c817
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h140
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c980
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h147
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c559
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c358
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c211
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c245
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h18
25 files changed, 12834 insertions, 4426 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index cb97ec56fdec..2bc2b707d6ee 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c5c8effc0139..c78b6e9dea2c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -55,27 +56,28 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
+#include <linux/align.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
-
-static const char version[] =
- "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
+ NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -83,52 +85,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_TX_PUSH_THRESH 164
-enum board_idx {
- BCM57301,
- BCM57302,
- BCM57304,
- BCM57417_NPAR,
- BCM58700,
- BCM57311,
- BCM57312,
- BCM57402,
- BCM57404,
- BCM57406,
- BCM57402_NPAR,
- BCM57407,
- BCM57412,
- BCM57414,
- BCM57416,
- BCM57417,
- BCM57412_NPAR,
- BCM57314,
- BCM57417_SFP,
- BCM57416_SFP,
- BCM57404_NPAR,
- BCM57406_NPAR,
- BCM57407_SFP,
- BCM57407_NPAR,
- BCM57414_NPAR,
- BCM57416_NPAR,
- BCM57452,
- BCM57454,
- BCM5745x_NPAR,
- BCM57508,
- BCM57504,
- BCM57502,
- BCM57508_NPAR,
- BCM57504_NPAR,
- BCM57502_NPAR,
- BCM58802,
- BCM58804,
- BCM58808,
- NETXTREME_E_VF,
- NETXTREME_C_VF,
- NETXTREME_S_VF,
- NETXTREME_E_P5_VF,
-};
-
-/* indexed by enum above */
+/* indexed by enum board_idx */
static const struct {
char *name;
} board_info[] = {
@@ -173,7 +130,10 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
+ [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
+ [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
+ [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -225,15 +185,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -258,6 +228,13 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
+ ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
+ ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -265,7 +242,9 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
- idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -279,13 +258,15 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ (db)->doorbell)
#define BNXT_DB_CQ_ARM(db, idx) \
writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
+ (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
@@ -306,8 +287,8 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
- writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
- db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ RING_CMP(idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -344,6 +325,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 prod)
+{
+ bnxt_db_write(bp, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ struct netdev_queue *txq)
+{
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+
+ return true;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -358,10 +366,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
+ __le32 lflags = 0;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -371,8 +381,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
- netif_tx_stop_queue(txq);
- return NETDEV_TX_BUSY;
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bp, tx_err, dev,
+ "bnxt: ring busy w/ flush pending!\n");
+ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ return NETDEV_TX_BUSY;
}
length = skb->len;
@@ -399,7 +413,28 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
+ atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
+ if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+ &ptp->tx_hdr_off)) {
+ if (vlan_tag_flags)
+ ptp->tx_hdr_off += VLAN_HLEN;
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else {
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+ }
+ }
+ }
+
+ if (unlikely(skb->no_fcs))
+ lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
+ !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -474,21 +509,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
- if (skb_pad(skb, pad)) {
+ if (skb_pad(skb, pad))
/* SKB already freed. */
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -500,26 +530,23 @@ normal_tx:
txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- txbd1->tx_bd_hsize_lflags = 0;
+ txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
- txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
length = skb_shinfo(skb)->gso_size;
txbd1->tx_bd_mss = cpu_to_le32(length);
length += hdr_len;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- txbd1->tx_bd_hsize_lflags =
+ txbd1->tx_bd_hsize_lflags |=
cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
txbd1->tx_bd_mss = 0;
}
@@ -566,6 +593,8 @@ normal_tx:
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* Sync BD data before updating doorbell */
wmb();
@@ -573,36 +602,31 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
- bnxt_db_write(bp, &txr->tx_db, prod);
+ bnxt_txr_db_kick(bp, txr, prod);
+ else
+ txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
- bnxt_db_write(bp, &txr->tx_db, prod);
+ bnxt_txr_db_kick(bp, txr, prod);
- netif_tx_stop_queue(txq);
-
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
- netif_tx_wake_queue(txq);
+ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
tx_dma_error:
+ if (BNXT_TX_PTP_IS_SET(lflags))
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+
last_frag = i;
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
/* unmap remaining mapped pages */
@@ -611,10 +635,16 @@ tx_dma_error:
tx_buf = &txr->tx_buf_ring[prod];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
+tx_free:
dev_kfree_skb_any(skb);
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -637,13 +667,15 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
}
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
@@ -653,13 +685,21 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[j]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
+ if (!bnxt_get_tx_ts_p5(bp, skb))
+ skb = NULL;
+ else
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+ }
}
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
dev_kfree_skb_any(skb);
}
@@ -674,14 +714,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
- txr->dev_state != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
- __netif_tx_unlock(txq);
- }
+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -701,17 +736,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
- *mapping += bp->rx_dma_offset;
return page;
}
-static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
u8 *data;
struct pci_dev *pdev = bp->pdev;
- data = kmalloc(bp->rx_buf_size, gfp);
+ if (gfp == GFP_ATOMIC)
+ data = napi_alloc_frag(bp->rx_buf_size);
+ else
+ data = netdev_alloc_frag(bp->rx_buf_size);
if (!data)
return NULL;
@@ -720,7 +757,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
- kfree(data);
+ skb_free_frag(data);
data = NULL;
}
return data;
@@ -740,10 +777,11 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
if (!data)
return -ENOMEM;
@@ -800,33 +838,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+ if (!page)
+ return -ENOMEM;
+
+ } else {
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
}
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -922,6 +968,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+ bp->rx_dma_offset);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_dma_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -944,7 +1023,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -955,6 +1033,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -985,11 +1064,11 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
return NULL;
}
@@ -998,22 +1077,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct skb_shared_info *shinfo,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1029,8 +1110,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ skb_frag_off_set(frag, cons_rx_buf->offset);
+ skb_frag_size_set(frag, frag_len);
+ __skb_frag_set_page(frag, cons_rx_buf->page);
+ shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1041,16 +1124,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
+ if (xdp && page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
unsigned int nr_frags;
- shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
- dev_kfree_skb(skb);
-
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
@@ -1058,23 +1139,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE,
+ bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
-
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+ agg_bufs, tpa, NULL);
+ if (!total_frag_len) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb->data_len += total_frag_len;
+ skb->len += total_frag_len;
+ skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+ idx, agg_bufs, tpa, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1145,6 +1265,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else
@@ -1159,19 +1282,14 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
schedule_work(&bp->sp_task);
}
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp))
- flush_workqueue(bnxt_pf_wq);
- else
- cancel_work_sync(&bp->sp_task);
-}
-
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
@@ -1269,8 +1387,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
- if (netif_msg_rx_err(bp))
- netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
@@ -1573,15 +1690,17 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1589,14 +1708,15 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1604,9 +1724,10 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
+ cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -1618,12 +1739,17 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vlan_proto = tpa_info->metadata >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
+ __be16 vlan_proto = htons(tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
}
skb_checksum_none_assert(skb);
@@ -1683,11 +1809,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
+ u32 flags, misc;
void *data;
int rc = 0;
- u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1707,6 +1835,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1735,12 +1867,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cons = rxcmp->rx_cmp_opaque;
if (unlikely(cons != rxr->rx_next_cons)) {
- int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
- netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ /* 0xffff is forced error, don't print it */
+ if (rxr->rx_next_cons != 0xffff)
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
- return rc1;
+ if (rc1)
+ return rc1;
+ goto next_rx_no_prod_no_len;
}
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
@@ -1770,31 +1906,55 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- netdev_warn(bp->dev, "RX buffer error %x\n",
- rx_err);
+ bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
+ netdev_warn_once(bp->dev, "RX buffer error %x\n",
+ rx_err);
bnxt_sched_reset(bp, rxr);
}
}
goto next_rx_no_len;
}
- len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+ len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+ if (!frag_len) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
+ cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
@@ -1808,16 +1968,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
}
}
@@ -1836,12 +2009,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ __be16 vlan_proto = htons(meta_data >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ goto next_rx;
+ }
}
skb_checksum_none_assert(skb);
@@ -1853,10 +2032,28 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
}
}
+ if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+ RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+ u64 ns, ts;
+
+ if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ memset(skb_hwtstamps(skb), 0,
+ sizeof(*skb_hwtstamps(skb)));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ }
+ }
+ }
bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
@@ -1886,6 +2083,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
struct rx_cmp *rxcmp;
u16 cp_cons;
u8 cmp_type;
+ int rc;
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp = (struct rx_cmp *)
@@ -1899,6 +2097,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -1910,7 +2112,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
tpa_end1->rx_tpa_end_cmp_errors_v2 |=
cpu_to_le32(RX_TPA_END_CMP_ERRORS);
}
- return bnxt_rx_pkt(bp, cpr, raw_cons, event);
+ rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
+ if (rc && rc != -EBUSY)
+ cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ return rc;
}
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
@@ -1927,7 +2132,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
break;
case BNXT_FW_HEALTH_REG_TYPE_GRC:
reg_off = fw_health->mapped_regs[reg_idx];
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
val = readl(bp->bar0 + reg_off);
break;
@@ -1940,19 +2145,78 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
return val;
}
+static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 grp_idx = bp->rx_ring[i].bnapi->index;
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[grp_idx];
+ if (grp_info->agg_fw_ring_id == ring_id)
+ return grp_idx;
+ }
+ return INVALID_HW_RING_ID;
+}
+
+static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
+{
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
+ netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
+ BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ netdev_warn(bp->dev, "Pause Storm detected!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
+ break;
+ default:
+ netdev_err(bp->dev, "FW reported unknown error type %u\n",
+ err_type);
+ break;
+ }
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+#define BNXT_EVENT_RING_TYPE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
+
+#define BNXT_EVENT_RING_TYPE_RX(data2) \
+ (BNXT_EVENT_RING_TYPE(data2) == \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u32 data2 = le32_to_cpu(cmpl->event_data2);
+
+ netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
+ event_id, data1, data2);
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
if (BNXT_VF(bp))
@@ -1970,11 +2234,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
}
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -1982,7 +2246,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
if (BNXT_VF(bp))
@@ -2000,7 +2263,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
+ char *type_str = "Solicited";
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2012,43 +2275,136 @@ static int bnxt_async_event_process(struct bnxt *bp,
bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
- if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
+ set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
+ } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ type_str = "Fatal";
+ bp->fw_health->fatalities++;
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- } else {
- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
- bp->fw_reset_max_dsecs * 100);
- }
+ } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
+ EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
+ type_str = "Non-fatal";
+ bp->fw_health->survivals++;
+ set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ }
+ netif_warn(bp, hw, bp->dev,
+ "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ type_str, data1, data2,
+ bp->fw_reset_min_dsecs * 100,
+ bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
- u32 data1 = le32_to_cpu(cmpl->event_data1);
+ char *status_desc = "healthy";
+ u32 status;
if (!fw_health)
goto async_event_process_exit;
- fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled)
+ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
+ fw_health->enabled = false;
+ netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
break;
-
- if (netif_msg_drv(bp))
- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
- fw_health->enabled, fw_health->master,
- bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_CNT_REG),
- bnxt_fw_health_readl(bp,
- BNXT_FW_HEALTH_REG));
+ }
+ fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
- fw_health->last_fw_heartbeat =
- bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (!fw_health->enabled)
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (status != BNXT_FW_STATUS_HEALTHY)
+ status_desc = "unhealthy";
+ netif_info(bp, drv, bp->dev,
+ "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
+ fw_health->primary ? "primary" : "backup", status,
+ status_desc, fw_health->last_fw_reset_cnt);
+ if (!fw_health->enabled) {
+ /* Make sure tmr_counter is set and visible to
+ * bnxt_health_check() before setting enabled to true.
+ */
+ smp_wmb();
+ fw_health->enabled = true;
+ }
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ goto async_event_process_exit;
+ case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
+ struct bnxt_rx_ring_info *rxr;
+ u16 grp_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto async_event_process_exit;
+
+ netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
+ BNXT_EVENT_RING_TYPE(data2), data1);
+ if (!BNXT_EVENT_RING_TYPE_RX(data2))
+ goto async_event_process_exit;
+
+ grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
+ if (grp_idx == INVALID_HW_RING_ID) {
+ netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
+ data1);
+ goto async_event_process_exit;
+ }
+ rxr = bp->bnapi[grp_idx]->rx_ring;
+ bnxt_sched_reset(bp, rxr);
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ if (fw_health) {
+ fw_health->echo_req_data1 = data1;
+ fw_health->echo_req_data2 = data2;
+ set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
+ bnxt_ptp_pps_event(bp, data1, data2);
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
+ bnxt_event_error_report(bp, data1, data2);
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+ break;
+ }
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
+ u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
+
+ hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
default:
@@ -2070,10 +2426,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
switch (cmpl_type) {
case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id);
- if (seq_id == bp->hwrm_intr_seq_id)
- bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
- else
- netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+ hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
break;
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
@@ -2094,6 +2447,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
bnxt_async_event_process(bp,
(struct hwrm_async_event_cmpl *)txcmp);
+ break;
default:
break;
@@ -2166,6 +2520,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct tx_cmp *txcmp;
cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
while (1) {
int rc;
@@ -2179,11 +2534,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
@@ -2224,7 +2578,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (event & BNXT_REDIRECT_EVENT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -2249,13 +2603,16 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
bnapi->tx_pkts = 0;
}
- if (bnapi->events & BNXT_RX_EVENT) {
+ if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ }
bnapi->events = 0;
}
@@ -2299,6 +2656,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@@ -2351,6 +2712,10 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
while (1) {
work_done += bnxt_poll_work(bp, cpr, budget - work_done);
@@ -2396,7 +2761,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
- u64 dbr_type, bool all)
+ u64 dbr_type)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
@@ -2405,10 +2770,10 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) {
+ if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
- writeq(db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -2419,36 +2784,35 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_cp_ring_info *cpr_rx;
u32 raw_cons = cpr->cp_raw_cons;
struct bnxt *bp = bnapi->bp;
struct nqe_cn *nqcmp;
int work_done = 0;
u32 cons;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
- if (cpr->has_more_work) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
- return work_done;
- }
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
- if (napi_complete_done(napi, work_done))
- BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
- return work_done;
}
while (1) {
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
- false);
+ if (cpr->has_more_work)
+ break;
+
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
cpr->cp_raw_cons = raw_cons;
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
cpr->cp_raw_cons);
- return work_done;
+ goto poll_done;
}
/* The valid test of the entry must be done first before
@@ -2460,19 +2824,35 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
+ /* No more budget for RX work */
+ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ break;
+
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
- cpr->has_more_work = cpr2->has_more_work;
+ cpr->has_more_work |= cpr2->has_more_work;
} else {
bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (cpr->has_more_work)
- break;
}
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
- cpr->cp_raw_cons = raw_cons;
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+ if (raw_cons != cpr->cp_raw_cons) {
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
+ }
+poll_done:
+ cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ struct dim_sample dim_sample = {};
+
+ dim_update_sample(cpr->event_ctr,
+ cpr_rx->rx_packets,
+ cpr_rx->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
return work_done;
}
@@ -2489,6 +2869,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
+ if (!txr->tx_buf_ring)
+ continue;
+
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
@@ -2499,7 +2882,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
dma_unmap_len(tx_buf, len),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
xdp_return_frame(tx_buf->xdpf);
tx_buf->action = 0;
tx_buf->xdpf = NULL;
@@ -2524,7 +2907,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
last = tx_buf->nr_frags;
j += 2;
@@ -2536,7 +2919,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), PCI_DMA_TODEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
dev_kfree_skb(skb);
}
@@ -2544,93 +2927,110 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_rx_skbs(struct bnxt *bp)
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
- int i, max_idx, max_agg_idx;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
-
- if (!bp->rx_ring)
- return;
+ struct bnxt_tpa_idx_map *map;
+ int i, max_idx, max_agg_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_tpa_idx_map *map;
- int j;
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- struct bnxt_tpa_info *tpa_info =
- &rxr->rx_tpa[j];
- u8 *data = tpa_info->data;
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
- if (!data)
- continue;
+ if (!data)
+ continue;
- dma_unmap_single_attrs(&pdev->dev,
- tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
- tpa_info->data = NULL;
+ tpa_info->data = NULL;
- kfree(data);
- }
- }
+ skb_free_frag(data);
+ }
- for (j = 0; j < max_idx; j++) {
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- dma_addr_t mapping = rx_buf->mapping;
- void *data = rx_buf->data;
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
- if (!data)
- continue;
+ for (i = 0; i < max_idx; i++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ dma_addr_t mapping = rx_buf->mapping;
+ void *data = rx_buf->data;
- rx_buf->data = NULL;
+ if (!data)
+ continue;
- if (BNXT_RX_PAGE_MODE(bp)) {
- mapping -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
+ rx_buf->data = NULL;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ mapping -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ page_pool_recycle_direct(rxr->page_pool, data);
+ } else {
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb_free_frag(data);
}
+ }
- for (j = 0; j < max_agg_idx; j++) {
- struct bnxt_sw_rx_agg_bd *rx_agg_buf =
- &rxr->rx_agg_ring[j];
- struct page *page = rx_agg_buf->page;
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
- if (!page)
- continue;
+ for (i = 0; i < max_agg_idx; i++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
+ struct page *page = rx_agg_buf->page;
+ if (!page)
+ continue;
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE,
+ BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ } else {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
- __clear_bit(j, rxr->rx_agg_bmap);
+ __clear_bit(i, rxr->rx_agg_bmap);
__free_page(page);
}
- if (rxr->rx_page) {
- __free_page(rxr->rx_page);
- rxr->rx_page = NULL;
- }
- map = rxr->rx_tpa_idx_map;
- if (map)
- memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
+
+skip_rx_agg_free:
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
+ }
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->rx_ring)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ bnxt_free_one_rx_ring_skbs(bp, i);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -2639,11 +3039,31 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
+static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+{
+ u8 init_val = mem_init->init_val;
+ u16 offset = mem_init->offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNXT_MEM_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += mem_init->size)
+ *(p2 + i + offset) = init_val;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
int i;
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
@@ -2653,6 +3073,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
+skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
@@ -2698,9 +3119,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->init_val)
- memset(rmem->pg_arr[i], rmem->init_val,
- rmem->page_size);
+ if (rmem->mem_init)
+ bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -2848,7 +3269,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc)
return rc;
- rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+ rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
return rc;
@@ -2962,6 +3383,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
+ spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -2970,6 +3392,62 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ kfree(cpr->cp_desc_ring);
+ cpr->cp_desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->cp_desc_mapping);
+ cpr->cp_desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
+{
+ cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
+ if (!cpr->cp_desc_ring)
+ return -ENOMEM;
+ cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
+ GFP_KERNEL);
+ if (!cpr->cp_desc_mapping)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_free_all_cp_arrays(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ if (!bnapi)
+ continue;
+ bnxt_free_cp_arrays(&bnapi->cp_ring);
+ }
+}
+
+static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
+{
+ int i, n = bp->cp_nr_pages;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ int rc;
+
+ if (!bnapi)
+ continue;
+ rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static void bnxt_free_cp_rings(struct bnxt *bp)
{
int i;
@@ -2997,6 +3475,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
if (cpr2) {
ring = &cpr2->cp_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
kfree(cpr2);
cpr->cp_ring_arr[j] = NULL;
}
@@ -3015,6 +3494,12 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (!cpr)
return NULL;
+ rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
+ if (rc) {
+ bnxt_free_cp_arrays(cpr);
+ kfree(cpr);
+ return NULL;
+ }
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->cp_nr_pages;
@@ -3025,6 +3510,7 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
rc = bnxt_alloc_ring(bp, rmem);
if (rc) {
bnxt_free_ring(bp, rmem);
+ bnxt_free_cp_arrays(cpr);
kfree(cpr);
cpr = NULL;
}
@@ -3169,31 +3655,16 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct net_device *dev = bp->dev;
- struct bnxt_rx_ring_info *rxr;
- struct bnxt_ring_struct *ring;
- u32 prod, type;
+ u32 prod;
int i;
- type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
-
- if (NET_IP_ALIGN == 2)
- type |= RX_BD_FLAGS_SOP;
-
- rxr = &bp->rx_ring[ring_nr];
- ring = &rxr->rx_ring_struct;
- bnxt_init_rxbd_pages(ring, type);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3201,22 +3672,13 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
- ring->fw_ring_id = INVALID_HW_RING_ID;
-
- ring = &rxr->rx_agg_ring_struct;
- ring->fw_ring_id = INVALID_HW_RING_ID;
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
-
- bnxt_init_rxbd_pages(ring, type);
-
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3225,30 +3687,58 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
}
rxr->rx_agg_prod = prod;
- if (bp->flags & BNXT_FLAG_TPA) {
- if (rxr->rx_tpa) {
- u8 *data;
- dma_addr_t mapping;
+ if (rxr->rx_tpa) {
+ dma_addr_t mapping;
+ u8 *data;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
- } else {
- netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
- return -ENOMEM;
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
}
-
return 0;
}
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 type;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bp->rx_ring[ring_nr];
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+ }
+
+ return bnxt_alloc_one_rx_ring(bp, ring_nr);
+}
+
static void bnxt_init_cp_rings(struct bnxt *bp)
{
int i, j;
@@ -3300,7 +3790,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
- MAX_SKB_FRAGS + 1);
+ BNXT_MIN_TX_DESC_CNT);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
@@ -3384,7 +3874,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
+ get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
@@ -3427,13 +3917,13 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
*/
void bnxt_set_ring_params(struct bnxt *bp)
{
- u32 ring_size, rx_size, rx_space;
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
u32 agg_factor = 0, agg_ring_size = 0;
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3453,9 +3943,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
if (jumbo_factor > agg_factor)
agg_factor = jumbo_factor;
}
- agg_ring_size = ring_size * agg_factor;
+ if (agg_factor) {
+ if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
+ bp->rx_ring_size, ring_size);
+ bp->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
- if (agg_ring_size) {
bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
RX_DESC_CNT);
if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
@@ -3468,9 +3964,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+ rx_size = BNXT_MAX_PAGE_MODE_MTU;
+ } else {
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3483,7 +3985,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
- ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ max_rx_cmpl = bp->rx_ring_size;
+ /* MAX TPA needs to be added because TPA_START completions are
+ * immediately recycled, so the TPA completions are not bound by
+ * the RX ring size.
+ */
+ if (bp->flags & BNXT_FLAG_TPA)
+ max_rx_cmpl += bp->max_tpa;
+ /* RX and TPA completions are 32-byte, all others are 16-byte */
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
bp->cp_ring_size = ring_size;
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
@@ -3503,14 +4013,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
} else {
@@ -3547,7 +4064,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp)
}
if (vnic->rss_table) {
- dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
vnic->rss_table = NULL;
@@ -3612,7 +4129,13 @@ vnic_skip_grps:
continue;
/* Allocate rss table and hash key */
- vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev,
+ vnic->rss_table_size,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table) {
@@ -3620,8 +4143,6 @@ vnic_skip_grps:
goto out;
}
- size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
-
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
}
@@ -3633,142 +4154,215 @@ out:
static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
+ struct bnxt_hwrm_wait_token *token;
- if (bp->hwrm_cmd_resp_addr) {
- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
- bp->hwrm_cmd_resp_dma_addr);
- bp->hwrm_cmd_resp_addr = NULL;
- }
+ dma_pool_destroy(bp->hwrm_dma_pool);
+ bp->hwrm_dma_pool = NULL;
- if (bp->hwrm_cmd_kong_resp_addr) {
- dma_free_coherent(&pdev->dev, PAGE_SIZE,
- bp->hwrm_cmd_kong_resp_addr,
- bp->hwrm_cmd_kong_resp_dma_addr);
- bp->hwrm_cmd_kong_resp_addr = NULL;
- }
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
+ rcu_read_unlock();
}
-static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
-
- if (bp->hwrm_cmd_kong_resp_addr)
- return 0;
-
- bp->hwrm_cmd_kong_resp_addr =
- dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &bp->hwrm_cmd_kong_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_cmd_kong_resp_addr)
+ bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
+ BNXT_HWRM_DMA_SIZE,
+ BNXT_HWRM_DMA_ALIGN, 0);
+ if (!bp->hwrm_dma_pool)
return -ENOMEM;
+ INIT_HLIST_HEAD(&bp->hwrm_pending_list);
+
return 0;
}
-static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
{
- struct pci_dev *pdev = bp->pdev;
+ kfree(stats->hw_masks);
+ stats->hw_masks = NULL;
+ kfree(stats->sw_stats);
+ stats->sw_stats = NULL;
+ if (stats->hw_stats) {
+ dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
- bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &bp->hwrm_cmd_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_cmd_resp_addr)
+static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
+ bool alloc_masks)
+{
+ stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
return -ENOMEM;
+ stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->sw_stats)
+ goto stats_mem_err;
+
+ if (alloc_masks) {
+ stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->hw_masks)
+ goto stats_mem_err;
+ }
return 0;
+
+stats_mem_err:
+ bnxt_free_stats_mem(bp, stats);
+ return -ENOMEM;
}
-static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
+static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
{
- if (bp->hwrm_short_cmd_req_addr) {
- struct pci_dev *pdev = bp->pdev;
+ int i;
- dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
- bp->hwrm_short_cmd_req_addr,
- bp->hwrm_short_cmd_req_dma_addr);
- bp->hwrm_short_cmd_req_addr = NULL;
- }
+ for (i = 0; i < count; i++)
+ mask_arr[i] = mask;
}
-static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
+static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
{
- struct pci_dev *pdev = bp->pdev;
-
- if (bp->hwrm_short_cmd_req_addr)
- return 0;
-
- bp->hwrm_short_cmd_req_addr =
- dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
- &bp->hwrm_short_cmd_req_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_short_cmd_req_addr)
- return -ENOMEM;
+ int i;
- return 0;
+ for (i = 0; i < count; i++)
+ mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
}
-static void bnxt_free_port_stats(struct bnxt *bp)
+static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
+ struct bnxt_stats_mem *stats)
{
- struct pci_dev *pdev = bp->pdev;
+ struct hwrm_func_qstats_ext_output *resp;
+ struct hwrm_func_qstats_ext_input *req;
+ __le64 *hw_masks;
+ int rc;
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
- bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EOPNOTSUPP;
- if (bp->hw_rx_port_stats) {
- dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
- bp->hw_rx_port_stats,
- bp->hw_rx_port_stats_map);
- bp->hw_rx_port_stats = NULL;
- }
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
- if (bp->hw_tx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
- bp->hw_tx_port_stats_ext,
- bp->hw_tx_port_stats_ext_map);
- bp->hw_tx_port_stats_ext = NULL;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ hw_masks = &resp->rx_ucast_pkts;
+ bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
}
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
+
+static void bnxt_init_stats(struct bnxt *bp)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+ __le64 *rx_stats, *tx_stats;
+ int rc, rx_count, tx_count;
+ u64 *rx_masks, *tx_masks;
+ u64 mask;
+ u8 flags;
- if (bp->hw_rx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- bp->hw_rx_port_stats_ext,
- bp->hw_rx_port_stats_ext_map);
- bp->hw_rx_port_stats_ext = NULL;
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ rc = bnxt_hwrm_func_qstat_ext(bp, stats);
+ if (rc) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ mask = (1ULL << 48) - 1;
+ else
+ mask = -1ULL;
+ bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ stats = &bp->port_stats;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats) / 8;
+ tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_count = sizeof(struct tx_port_stats) / 8;
+
+ flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
- if (bp->hw_pcie_stats) {
- dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- bp->hw_pcie_stats, bp->hw_pcie_stats_map);
- bp->hw_pcie_stats = NULL;
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
+ bnxt_hwrm_port_qstats(bp, 0);
+ }
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ stats = &bp->rx_port_stats_ext;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats_ext) / 8;
+ stats = &bp->tx_port_stats_ext;
+ tx_stats = stats->hw_stats;
+ tx_masks = stats->hw_masks;
+ tx_count = sizeof(struct tx_port_stats_ext) / 8;
+
+ flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats_ext(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
+
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ if (tx_stats)
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ if (tx_stats)
+ bnxt_copy_hw_masks(tx_masks, tx_stats,
+ tx_count);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ }
}
}
+static void bnxt_free_port_stats(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+
+ bnxt_free_stats_mem(bp, &bp->port_stats);
+ bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
+ bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
+}
+
static void bnxt_free_ring_stats(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
- int size, i;
+ int i;
if (!bp->bnapi)
return;
- size = bp->hw_ring_stats_size;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- if (cpr->hw_stats) {
- dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
- cpr->hw_stats_map);
- cpr->hw_stats = NULL;
- }
+ bnxt_free_stats_mem(bp, &cpr->stats);
}
}
static int bnxt_alloc_stats(struct bnxt *bp)
{
u32 size, i;
- struct pci_dev *pdev = bp->pdev;
+ int rc;
size = bp->hw_ring_stats_size;
@@ -3776,11 +4370,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
- &cpr->hw_stats_map,
- GFP_KERNEL);
- if (!cpr->hw_stats)
- return -ENOMEM;
+ cpr->stats.len = size;
+ rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
+ if (rc)
+ return rc;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -3788,22 +4381,14 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
- if (bp->hw_rx_port_stats)
+ if (bp->port_stats.hw_stats)
goto alloc_ext_stats;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
-
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->port_stats.len = BNXT_PORT_STATS_SIZE;
+ rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
+ if (rc)
+ return rc;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
@@ -3812,41 +4397,28 @@ alloc_ext_stats:
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
+ if (bp->rx_port_stats_ext.hw_stats)
goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
+ bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- goto alloc_pcie_stats;
+ if (bp->tx_port_stats_ext.hw_stats)
+ return 0;
if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
+ return 0;
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
-
-alloc_pcie_stats:
- if (bp->hw_pcie_stats ||
- !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
- return 0;
-
- bp->hw_pcie_stats =
- dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- &bp->hw_pcie_stats_map, GFP_KERNEL);
- if (!bp->hw_pcie_stats)
- return 0;
-
- bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -3905,7 +4477,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -3924,9 +4496,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -3943,9 +4513,13 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
+ bnxt_free_all_cp_arrays(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
+ if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -4049,6 +4623,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
+ bnxt_init_stats(bp);
rc = bnxt_alloc_ntp_fltrs(bp);
if (rc)
@@ -4059,6 +4634,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
goto alloc_mem_err;
}
+ rc = bnxt_alloc_all_cp_arrays(bp);
+ if (rc)
+ goto alloc_mem_err;
+
bnxt_init_ring_struct(bp);
rc = bnxt_alloc_rx_rings(bp);
@@ -4115,6 +4694,9 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
+ if (!bp->irq_tbl)
+ return;
+
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
@@ -4138,306 +4720,38 @@ static void bnxt_enable_int(struct bnxt *bp)
}
}
-void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
- u16 cmpl_ring, u16 target_id)
-{
- struct input *req = request;
-
- req->req_type = cpu_to_le16(req_type);
- req->cmpl_ring = cpu_to_le16(cmpl_ring);
- req->target_id = cpu_to_le16(target_id);
- if (bnxt_kong_hwrm_message(bp, req))
- req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
- else
- req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
-}
-
-static int bnxt_hwrm_to_stderr(u32 hwrm_err)
-{
- switch (hwrm_err) {
- case HWRM_ERR_CODE_SUCCESS:
- return 0;
- case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
- return -EACCES;
- case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
- return -ENOSPC;
- case HWRM_ERR_CODE_INVALID_PARAMS:
- case HWRM_ERR_CODE_INVALID_FLAGS:
- case HWRM_ERR_CODE_INVALID_ENABLES:
- case HWRM_ERR_CODE_UNSUPPORTED_TLV:
- case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
- return -EINVAL;
- case HWRM_ERR_CODE_NO_BUFFER:
- return -ENOMEM;
- case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
- return -EAGAIN;
- case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- default:
- return -EIO;
- }
-}
-
-static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
- int timeout, bool silent)
-{
- int i, intr_process, rc, tmo_count;
- struct input *req = msg;
- u32 *data = msg;
- __le32 *resp_len;
- u8 *valid;
- u16 cp_ring_id, len = 0;
- struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
- u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
- struct hwrm_short_input short_input = {0};
- u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
- u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
- u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
- u16 dst = BNXT_HWRM_CHNL_CHIMP;
-
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
- return -EBUSY;
-
- if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
- if (msg_len > bp->hwrm_max_ext_req_len ||
- !bp->hwrm_short_cmd_req_addr)
- return -EINVAL;
- }
-
- if (bnxt_hwrm_kong_chnl(bp, req)) {
- dst = BNXT_HWRM_CHNL_KONG;
- bar_offset = BNXT_GRCPF_REG_KONG_COMM;
- doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
- resp = bp->hwrm_cmd_kong_resp_addr;
- resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
- }
-
- memset(resp, 0, PAGE_SIZE);
- cp_ring_id = le16_to_cpu(req->cmpl_ring);
- intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
-
- req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
- /* currently supports only one outstanding message */
- if (intr_process)
- bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- msg_len > BNXT_HWRM_MAX_REQ_LEN) {
- void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
- u16 max_msg_len;
-
- /* Set boundary for maximum extended request length for short
- * cmd format. If passed up from device use the max supported
- * internal req length.
- */
- max_msg_len = bp->hwrm_max_ext_req_len;
-
- memcpy(short_cmd_req, req, msg_len);
- if (msg_len < max_msg_len)
- memset(short_cmd_req + msg_len, 0,
- max_msg_len - msg_len);
-
- short_input.req_type = req->req_type;
- short_input.signature =
- cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
- short_input.size = cpu_to_le16(msg_len);
- short_input.req_addr =
- cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
-
- data = (u32 *)&short_input;
- msg_len = sizeof(short_input);
-
- /* Sync memory write before updating doorbell */
- wmb();
-
- max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
- }
-
- /* Write request msg to hwrm channel */
- __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
-
- for (i = msg_len; i < max_req_len; i += 4)
- writel(0, bp->bar0 + bar_offset + i);
-
- /* Ring channel doorbell */
- writel(1, bp->bar0 + doorbell_offset);
-
- if (!pci_is_enabled(bp->pdev))
- return 0;
-
- if (!timeout)
- timeout = DFLT_HWRM_CMD_TIMEOUT;
- /* convert timeout to usec */
- timeout *= 1000;
-
- i = 0;
- /* Short timeout for the first few iterations:
- * number of loops = number of loops for short timeout +
- * number of loops for standard timeout.
- */
- tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
- timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
- tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
-
- if (intr_process) {
- u16 seq_id = bp->hwrm_intr_seq_id;
-
- /* Wait until hwrm response cmpl interrupt is processed */
- while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
- i++ < tmo_count) {
- /* Abort the wait for completion if the FW health
- * check has failed.
- */
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
- return -EBUSY;
- /* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
- usleep_range(HWRM_SHORT_MIN_TIMEOUT,
- HWRM_SHORT_MAX_TIMEOUT);
- else
- usleep_range(HWRM_MIN_TIMEOUT,
- HWRM_MAX_TIMEOUT);
- }
-
- if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
- if (!silent)
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(req->req_type));
- return -EBUSY;
- }
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
- valid = resp_addr + len - 1;
- } else {
- int j;
-
- /* Check if response len is updated */
- for (i = 0; i < tmo_count; i++) {
- /* Abort the wait for completion if the FW health
- * check has failed.
- */
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
- return -EBUSY;
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
- if (len)
- break;
- /* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
- usleep_range(HWRM_SHORT_MIN_TIMEOUT,
- HWRM_SHORT_MAX_TIMEOUT);
- else
- usleep_range(HWRM_MIN_TIMEOUT,
- HWRM_MAX_TIMEOUT);
- }
-
- if (i >= tmo_count) {
- if (!silent)
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len);
- return -EBUSY;
- }
-
- /* Last byte of resp contains valid bit */
- valid = resp_addr + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
- /* make sure we read from updated DMA memory */
- dma_rmb();
- if (*valid)
- break;
- usleep_range(1, 5);
- }
-
- if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- if (!silent)
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len,
- *valid);
- return -EBUSY;
- }
- }
-
- /* Zero valid bit for compatibility. Valid bit in an older spec
- * may become a new field in a newer spec. We must make sure that
- * a new field not implemented by old spec will read zero.
- */
- *valid = 0;
- rc = le16_to_cpu(resp->error_code);
- if (rc && !silent)
- netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- le16_to_cpu(resp->req_type),
- le16_to_cpu(resp->seq_id), rc);
- return bnxt_hwrm_to_stderr(rc);
-}
-
-int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
-{
- return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
-}
-
-int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
- int timeout)
-{
- return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
-}
-
-int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
-{
- int rc;
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, msg, msg_len, timeout);
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
-int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
- int timeout)
-{
- int rc;
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
bool async_only)
{
- struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_drv_rgtr_input req = {0};
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
u32 flags;
int rc, i;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
- req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER |
- FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
- req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
- req.flags = cpu_to_le32(flags);
- req.ver_maj_8b = DRV_VER_MAJ;
- req.ver_min_8b = DRV_VER_MIN;
- req.ver_upd_8b = DRV_VER_UPD;
- req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
- req.ver_min = cpu_to_le16(DRV_VER_MIN);
- req.ver_upd = cpu_to_le16(DRV_VER_UPD);
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
if (BNXT_PF(bp)) {
u32 data[8];
@@ -4454,14 +4768,14 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
}
for (i = 0; i < 8; i++)
- req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+ req->vf_req_fwd[i] = cpu_to_le32(data[i]);
- req.enables |=
+ req->enables |=
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
- req.flags |= cpu_to_le32(
+ req->flags |= cpu_to_le32(
FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
@@ -4480,55 +4794,72 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
}
}
for (i = 0; i < 8; i++)
- req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+ req->async_event_fwd[i] |= cpu_to_le32(events[i]);
if (async_only)
- req.enables =
+ req->enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
if (resp->flags &
cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
- struct hwrm_func_drv_unrgtr_input req = {0};
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
- u32 rc = 0;
- struct hwrm_tunnel_dst_port_free_input req = {0};
+ struct hwrm_tunnel_dst_port_free_input *req;
+ int rc;
+
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
+ bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
+ bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
- req.tunnel_type = tunnel_type;
+ req->tunnel_type = tunnel_type;
switch (tunnel_type) {
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
- req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+ bp->vxlan_port = 0;
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
break;
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
- req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+ bp->nge_port = 0;
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
default:
break;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (rc)
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
rc);
@@ -4538,17 +4869,19 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
u8 tunnel_type)
{
- u32 rc = 0;
- struct hwrm_tunnel_dst_port_alloc_input req = {0};
- struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_tunnel_dst_port_alloc_output *resp;
+ struct hwrm_tunnel_dst_port_alloc_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
+ if (rc)
+ return rc;
- req.tunnel_type = tunnel_type;
- req.tunnel_dst_port_val = port;
+ req->tunnel_type = tunnel_type;
+ req->tunnel_dst_port_val = port;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
rc);
@@ -4557,43 +4890,55 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
switch (tunnel_type) {
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
- bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_port = port;
+ bp->vxlan_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
break;
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
- bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->nge_port = port;
+ bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
default:
break;
}
err_out:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
{
- struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
- req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
- req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
- req.mask = cpu_to_le32(vnic->rx_mask);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return hwrm_req_send_silent(bp, req);
}
#ifdef CONFIG_RFS_ACCEL
static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- struct hwrm_cfa_ntuple_filter_free_input req = {0};
+ struct hwrm_cfa_ntuple_filter_free_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
- req.ntuple_filter_id = fltr->filter_id;
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->ntuple_filter_id = fltr->filter_id;
+ return hwrm_req_send(bp, req);
}
#define BNXT_NTP_FLTR_FLAGS \
@@ -4618,132 +4963,133 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
+ struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
u32 flags = 0;
- int rc = 0;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
- req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+ rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req.dst_id = cpu_to_le16(fltr->rxq);
+ req->dst_id = cpu_to_le16(fltr->rxq);
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req.flags = cpu_to_le32(flags);
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->flags = cpu_to_le32(flags);
+ req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
- req.ethertype = htons(ETH_P_IP);
- memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
- req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
- req.ip_protocol = keys->basic.ip_proto;
+ req->ethertype = htons(ETH_P_IP);
+ memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+ req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+ req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
int i;
- req.ethertype = htons(ETH_P_IPV6);
- req.ip_addr_type =
+ req->ethertype = htons(ETH_P_IPV6);
+ req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- *(struct in6_addr *)&req.src_ipaddr[0] =
+ *(struct in6_addr *)&req->src_ipaddr[0] =
keys->addrs.v6addrs.src;
- *(struct in6_addr *)&req.dst_ipaddr[0] =
+ *(struct in6_addr *)&req->dst_ipaddr[0] =
keys->addrs.v6addrs.dst;
for (i = 0; i < 4; i++) {
- req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
}
} else {
- req.src_ipaddr[0] = keys->addrs.v4addrs.src;
- req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
- req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
- req.tunnel_type =
+ req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+ req->tunnel_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- req.src_port = keys->ports.src;
- req.src_port_mask = cpu_to_be16(0xffff);
- req.dst_port = keys->ports.dst;
- req.dst_port_mask = cpu_to_be16(0xffff);
+ req->src_port = keys->ports.src;
+ req->src_port_mask = cpu_to_be16(0xffff);
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = cpu_to_be16(0xffff);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc) {
- resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
fltr->filter_id = resp->ntuple_filter_id;
- }
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
- u8 *mac_addr)
+ const u8 *mac_addr)
{
- u32 rc = 0;
- struct hwrm_cfa_l2_filter_alloc_input req = {0};
- struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
- req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
- req.flags |=
+ req->flags |=
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
- req.enables =
+ req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
- memcpy(req.l2_addr, mac_addr, ETH_ALEN);
- req.l2_addr_mask[0] = 0xff;
- req.l2_addr_mask[1] = 0xff;
- req.l2_addr_mask[2] = 0xff;
- req.l2_addr_mask[3] = 0xff;
- req.l2_addr_mask[4] = 0xff;
- req.l2_addr_mask[5] = 0xff;
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ memcpy(req->l2_addr, mac_addr, ETH_ALEN);
+ req->l2_addr_mask[0] = 0xff;
+ req->l2_addr_mask[1] = 0xff;
+ req->l2_addr_mask[2] = 0xff;
+ req->l2_addr_mask[3] = 0xff;
+ req->l2_addr_mask[4] = 0xff;
+ req->l2_addr_mask[5] = 0xff;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
resp->l2_filter_id;
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{
+ struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
- int rc = 0;
+ int rc;
/* Any associated ntuple filters will also be cleared by firmware. */
- mutex_lock(&bp->hwrm_cmd_lock);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) {
- struct hwrm_cfa_l2_filter_free_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req,
- HWRM_CFA_L2_FILTER_FREE, -1, -1);
+ req->l2_filter_id = vnic->fw_l2_filter_id[j];
- req.l2_filter_id = vnic->fw_l2_filter_id[j];
-
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
}
vnic->uc_filter_count = 0;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
-
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -4751,12 +5097,15 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
- struct hwrm_vnic_tpa_cfg_input req = {0};
+ struct hwrm_vnic_tpa_cfg_input *req;
+ int rc;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
+ if (rc)
+ return rc;
if (tpa_flags) {
u16 mss = bp->dev->mtu - 40;
@@ -4770,9 +5119,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
if (tpa_flags & BNXT_FLAG_GRO)
flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
- req.flags = cpu_to_le32(flags);
+ req->flags = cpu_to_le32(flags);
- req.enables =
+ req->enables =
cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
@@ -4796,14 +5145,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
} else {
segs = ilog2(nsegs);
}
- req.max_agg_segs = cpu_to_le16(segs);
- req.max_aggs = cpu_to_le16(max_aggs);
+ req->max_agg_segs = cpu_to_le16(segs);
+ req->max_aggs = cpu_to_le16(max_aggs);
- req.min_agg_len = cpu_to_le32(512);
+ req->min_agg_len = cpu_to_le32(512);
}
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req);
}
static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
@@ -4840,118 +5189,212 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
}
}
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+{
+ int entries;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
+ else
+ entries = HW_HASH_INDEX_SIZE;
+
+ bp->rss_indir_tbl_entries = entries;
+ bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
+ GFP_KERNEL);
+ if (!bp->rss_indir_tbl)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+{
+ u16 max_rings, max_entries, pad, i;
+
+ if (!bp->rx_nr_rings)
+ return;
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+
+ max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < max_entries; i++)
+ bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+
+ pad = bp->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+}
+
+static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
+{
+ u16 i, tbl_size, max_ring = 0;
+
+ if (!bp->rss_indir_tbl)
+ return 0;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ for (i = 0; i < tbl_size; i++)
+ max_ring = max(max_ring, bp->rss_indir_tbl[i]);
+ return max_ring;
+}
+
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 2;
+ return 1;
+}
+
+static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
+ u16 i, j;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
+ if (!no_rss)
+ j = bp->rss_indir_tbl[i];
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+}
+
+static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnxt_rx_ring_info *rxr;
+ u16 tbl_size, i;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bp->rss_indir_tbl[i];
+ rxr = &bp->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ else
+ __bnxt_fill_hw_rss_tbl(bp, vnic);
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
- u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- struct hwrm_vnic_rss_cfg_input req = {0};
+ struct hwrm_vnic_rss_cfg_input *req;
+ int rc;
if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
- if (set_rss) {
- req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- max_rings = bp->rx_nr_rings - 1;
- else
- max_rings = bp->rx_nr_rings;
- } else {
- max_rings = 1;
- }
-
- /* Fill the RSS indirection table with ring group ids */
- for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
- if (j == max_rings)
- j = 0;
- vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
- }
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
- req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
- req.hash_key_tbl_addr =
+ if (set_rss) {
+ bnxt_fill_hw_rss_tbl(bp, vnic);
+ req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
- req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
- struct hwrm_vnic_rss_cfg_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
- if (!set_rss) {
- hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return 0;
- }
- req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
- req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
- for (i = 0, k = 0; i < nr_ctxs; i++) {
- __le16 *ring_tbl = vnic->rss_table;
- int rc;
+ struct hwrm_vnic_rss_cfg_input *req;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
- req.ring_table_pair_index = i;
- req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
- for (j = 0; j < 64; j++) {
- u16 ring_id;
-
- ring_id = rxr->rx_ring_struct.fw_ring_id;
- *ring_tbl++ = cpu_to_le16(ring_id);
- ring_id = bnxt_cp_ring_for_rx(bp, rxr);
- *ring_tbl++ = cpu_to_le16(ring_id);
- rxr++;
- k++;
- if (k == max_rings) {
- k = 0;
- rxr = &bp->rx_ring[0];
- }
- }
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return hwrm_req_send(bp, req);
+
+ bnxt_fill_hw_rss_tbl(bp, vnic);
+ req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
+
+ hwrm_req_hold(bp, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = hwrm_req_send(bp, req);
if (rc)
- return rc;
+ goto exit;
}
- return 0;
+
+exit:
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- struct hwrm_vnic_plcmodes_cfg_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
- req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req.enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+ if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ }
/* thresholds not implemented in firmware yet */
- req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
- req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return hwrm_req_send(bp, req);
}
static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
u16 ctx_idx)
{
- struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
- req.rss_cos_lb_ctx_id =
+ req->rss_cos_lb_ctx_id =
cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
- hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ hwrm_req_send(bp, req);
bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
@@ -4972,20 +5415,20 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
int rc;
- struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
- struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
- -1);
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -4999,47 +5442,50 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
- unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- struct hwrm_vnic_cfg_input req = {0};
+ struct hwrm_vnic_cfg_input *req;
+ unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
- req.default_rx_ring_id =
+ req->default_rx_ring_id =
cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
- req.default_cmpl_ring_id =
+ req->default_cmpl_ring_id =
cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
- req.enables =
+ req->enables =
cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
goto vnic_mru;
}
- req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
+ req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
- req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
- req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req.rss_rule =
+ req->rss_rule =
cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
- req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
- req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
} else {
- req.rss_rule = cpu_to_le16(0xffff);
+ req->rss_rule = cpu_to_le16(0xffff);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
(vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
- req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
- req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
+ req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
+ req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
} else {
- req.cos_rule = cpu_to_le16(0xffff);
+ req->cos_rule = cpu_to_le16(0xffff);
}
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -5050,40 +5496,38 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
- req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
- req.lb_rule = cpu_to_le16(0xffff);
+ req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+ req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN);
+ req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp))
def_vlan = bp->vf.vlan;
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
- req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
+ req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
{
- u32 rc = 0;
-
if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
- struct hwrm_vnic_free_input req = {0};
+ struct hwrm_vnic_free_input *req;
+
+ if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
+ return;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
- req.vnic_id =
+ req->vnic_id =
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ hwrm_req_send(bp, req);
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
- return rc;
}
static void bnxt_hwrm_vnic_free(struct bnxt *bp)
@@ -5098,11 +5542,15 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
unsigned int start_rx_ring_idx,
unsigned int nr_rings)
{
- int rc = 0;
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct hwrm_vnic_alloc_input req = {0};
- struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5)
goto vnic_no_ring_grps;
@@ -5122,22 +5570,20 @@ vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic_id == 0)
- req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
{
- struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_vnic_qcaps_input req = {0};
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
int rc;
bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
@@ -5145,9 +5591,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10600)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
@@ -5157,100 +5606,112 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+
+ /* Older P5 fw before EXT_HW_STATS support did not set
+ * VLAN_STRIP_CAP properly.
+ */
+ if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
+ (BNXT_CHIP_P5_THOR(bp) &&
+ !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
- if (bp->max_tpa_v2)
- bp->hw_ring_stats_size =
- sizeof(struct ctx_hw_stats_ext);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
+ else
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ }
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
{
+ struct hwrm_ring_grp_alloc_output *resp;
+ struct hwrm_ring_grp_alloc_input *req;
+ int rc;
u16 i;
- u32 rc = 0;
if (bp->flags & BNXT_FLAG_CHIP_P5)
return 0;
- mutex_lock(&bp->hwrm_cmd_lock);
+ rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct hwrm_ring_grp_alloc_input req = {0};
- struct hwrm_ring_grp_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+ req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+ req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
+ req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
+ req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
- req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
- req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
- req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
- req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
+ rc = hwrm_req_send(bp, req);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
if (rc)
break;
bp->grp_info[grp_idx].fw_grp_id =
le32_to_cpu(resp->ring_group_id);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
{
+ struct hwrm_ring_grp_free_input *req;
u16 i;
- u32 rc = 0;
- struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
- return 0;
+ return;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+ if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
+ return;
- mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_req_hold(bp, req);
for (i = 0; i < bp->cp_nr_rings; i++) {
if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
continue;
- req.ring_group_id =
+ req->ring_group_id =
cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ hwrm_req_send(bp, req);
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
+ hwrm_req_drop(bp, req);
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
{
- int rc = 0, err = 0;
- struct hwrm_ring_alloc_input req = {0};
- struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
struct bnxt_ring_grp_info *grp_info;
+ int rc, err = 0;
u16 ring_id;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
- req.enables = 0;
+ req->enables = 0;
if (rmem->nr_pages > 1) {
- req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
+ req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
/* Page size is in log2 units */
- req.page_size = BNXT_PAGE_SHIFT;
- req.page_tbl_depth = 1;
+ req->page_size = BNXT_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
} else {
- req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
}
- req.fbo = 0;
+ req->fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
- req.logical_id = cpu_to_le16(map_index);
+ req->logical_id = cpu_to_le16(map_index);
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
@@ -5258,67 +5719,67 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
grp_info = &bp->grp_info[ring->grp_idx];
- req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
- req.length = cpu_to_le32(bp->tx_ring_mask + 1);
- req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req.queue_id = cpu_to_le16(ring->queue_id);
+ req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
+ req->length = cpu_to_le32(bp->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
break;
}
case HWRM_RING_ALLOC_RX:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bp->rx_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u16 flags = 0;
/* Association of rx ring with stats context */
grp_info = &bp->grp_info[ring->grp_idx];
- req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req.enables |= cpu_to_le32(
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
if (NET_IP_ALIGN == 2)
flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req.flags = cpu_to_le16(flags);
+ req->flags = cpu_to_le16(flags);
}
break;
case HWRM_RING_ALLOC_AGG:
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */
grp_info = &bp->grp_info[ring->grp_idx];
- req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req.enables |= cpu_to_le32(
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
} else {
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
}
- req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
- req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5) {
/* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index];
- req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
- req.cq_handle = cpu_to_le64(ring->handle);
- req.enables |= cpu_to_le32(
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
- req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
- req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
- req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@@ -5326,12 +5787,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
return -1;
}
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
err = le16_to_cpu(resp->error_code);
ring_id = le16_to_cpu(resp->ring_id);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
+exit:
if (rc || err) {
netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
ring_type, rc, err);
@@ -5346,23 +5808,28 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
int rc;
if (BNXT_PF(bp)) {
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
- req.async_event_cr = cpu_to_le16(idx);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return hwrm_req_send(bp, req);
} else {
- struct hwrm_func_vf_cfg_input req = {0};
+ struct hwrm_func_vf_cfg_input *req;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
- req.enables =
+ req->enables =
cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
- req.async_event_cr = cpu_to_le16(idx);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->async_event_cr = cpu_to_le16(idx);
+ return hwrm_req_send(bp, req);
}
- return rc;
}
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
@@ -5370,9 +5837,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + 0x10000;
+ db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else
- db->doorbell = bp->bar1 + 0x4000;
+ db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -5533,23 +6000,27 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
{
+ struct hwrm_ring_free_output *resp;
+ struct hwrm_ring_free_input *req;
+ u16 error_code = 0;
int rc;
- struct hwrm_ring_free_input req = {0};
- struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
- u16 error_code;
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (BNXT_NO_FW_ACCESS(bp))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
- req.ring_type = ring_type;
- req.ring_id = cpu_to_le16(ring->fw_ring_id);
+ rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- error_code = le16_to_cpu(resp->error_code);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ error_code = le16_to_cpu(resp->error_code);
+ hwrm_req_drop(bp, req);
+exit:
if (rc || error_code) {
netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
ring_type, rc, error_code);
@@ -5664,20 +6135,23 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
static int bnxt_hwrm_get_rings(struct bnxt *bp)
{
- struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
int rc;
if (bp->hwrm_spec_code < 0x10601)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -5711,39 +6185,45 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return 0;
}
-/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
- struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
int rc;
if (bp->hwrm_spec_code < 0x10601)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(fid);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(fid);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hwrm_req_drop(bp, req);
return rc;
}
static bool bnxt_rfs_supported(struct bnxt *bp);
-static void
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
- int tx_rings, int rx_rings, int ring_grps,
- int cp_rings, int stats, int vnics)
+static struct hwrm_func_cfg_input *
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int stats, int vnics)
{
+ struct hwrm_func_cfg_input *req;
u32 enables = 0;
- bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
+ if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
+ return NULL;
+
req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
req->num_tx_rings = cpu_to_le16(tx_rings);
@@ -5784,17 +6264,19 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_vnics = cpu_to_le16(vnics);
}
req->enables = cpu_to_le32(enables);
+ return req;
}
-static void
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
- struct hwrm_func_vf_cfg_input *req, int tx_rings,
- int rx_rings, int ring_grps, int cp_rings,
- int stats, int vnics)
+static struct hwrm_func_vf_cfg_input *
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int stats, int vnics)
{
+ struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
- bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
+ if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
+ return NULL;
+
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5826,36 +6308,41 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
req->num_vnics = cpu_to_le16(vnics);
req->enables = cpu_to_le32(enables);
+ return req;
}
static int
bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int stats, int vnics)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
int rc;
- __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
- if (!req.enables)
+ req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings, stats, vnics);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ hwrm_req_drop(bp, req);
return 0;
+ }
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (rc)
return rc;
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int
bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int stats, int vnics)
{
- struct hwrm_func_vf_cfg_input req = {0};
+ struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
@@ -5863,14 +6350,16 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return 0;
}
- __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings, stats, vnics);
+ if (!req)
+ return -ENOMEM;
+
+ rc = hwrm_req_send(bp, req);
if (rc)
return rc;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
@@ -5924,6 +6413,21 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+/* Check if a default RSS map needs to be setup. This function is only
+ * used on older firmware that does not require reserving RX rings.
+ */
+static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ /* The RSS map is valid for RX rings set to resv_rx_rings */
+ if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
+ hw_resc->resv_rx_rings = bp->rx_nr_rings;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+ }
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -5932,22 +6436,28 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx;
- if (bp->hwrm_spec_code < 0x10601)
- return false;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
return true;
+ /* Old firmware does not need RX ring reservations but we still
+ * need to setup a default RSS map when needed. With new firmware
+ * we go through RX ring reservations first and then set up the
+ * RSS map for the successfully reserved RX rings when needed.
+ */
+ if (!BNXT_NEW_RM(bp)) {
+ bnxt_check_rss_tbl_no_rmgr(bp);
+ return false;
+ }
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
- if (BNXT_NEW_RM(bp) &&
- (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
- (hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5))))
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5)))
return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
@@ -6015,12 +6525,30 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rx = rx_rings << 1;
cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
bp->tx_nr_rings = tx;
+
+ /* If we cannot reserve all the RX rings, reset the RSS map only
+ * if absolutely necessary
+ */
+ if (rx_rings != bp->rx_nr_rings) {
+ netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
+ rx_rings, bp->rx_nr_rings);
+ if (netif_is_rxfh_configured(bp->dev) &&
+ (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
+ bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
+ bnxt_get_max_rss_ring(bp) >= rx_rings)) {
+ netdev_warn(bp->dev, "RSS table entries reverting to default\n");
+ bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ }
+ }
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = cp;
if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
return rc;
}
@@ -6028,15 +6556,14 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int stats,
int vnics)
{
- struct hwrm_func_vf_cfg_input req = {0};
+ struct hwrm_func_vf_cfg_input *req;
u32 flags;
- int rc;
if (!BNXT_NEW_RM(bp))
return 0;
- __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings, stats, vnics);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -6046,21 +6573,19 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
- req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ req->flags = cpu_to_le32(flags);
+ return hwrm_req_send_silent(bp, req);
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int stats,
int vnics)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
u32 flags;
- int rc;
- __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings, stats, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -6074,9 +6599,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
}
- req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ req->flags = cpu_to_le32(flags);
+ return hwrm_req_send_silent(bp, req);
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6097,9 +6621,9 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
{
- struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
- struct hwrm_ring_aggint_qcaps_input req = {0};
+ struct hwrm_ring_aggint_qcaps_output *resp;
+ struct hwrm_ring_aggint_qcaps_input *req;
int rc;
coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
@@ -6115,9 +6639,11 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10902)
return;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc) {
coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
coal_cap->nq_params = le32_to_cpu(resp->nq_params);
@@ -6137,7 +6663,7 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
le16_to_cpu(resp->num_cmpl_aggr_int_max);
coal_cap->timer_units = le16_to_cpu(resp->timer_units);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
}
static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
@@ -6152,8 +6678,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u16 val, tmr, max, flags = hw_coal->flags;
u32 cmpl_params = coal_cap->cmpl_params;
- u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
@@ -6196,8 +6722,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
- if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
@@ -6205,37 +6729,40 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
}
-/* Caller holds bp->hwrm_cmd_lock */
static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_coal *hw_coal)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
u32 nq_params = coal_cap->nq_params;
u16 tmr;
+ int rc;
if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
- -1, -1);
- req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
- req.flags =
+ rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ if (rc)
+ return rc;
+
+ req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
+ req->flags =
cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
- req.int_lat_tmr_min = cpu_to_le16(tmr);
- req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
- return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->int_lat_tmr_min = cpu_to_le16(tmr);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ return hwrm_req_send(bp, req);
}
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_coal coal;
+ int rc;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -6248,48 +6775,53 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
if (!bnapi->rx_ring)
return -ENODEV;
- bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
- HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+ rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ if (rc)
+ return rc;
- bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
- req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
+ req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
- return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
- HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req_rx);
}
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
- int i, rc = 0;
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
- req_tx = {0}, *req;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
+ *req;
+ int i, rc;
+
+ rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
- HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
- HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+ rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ if (rc) {
+ hwrm_req_drop(bp, req_rx);
+ return rc;
+ }
- bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
- bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
+ bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
+ bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
- mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_req_hold(bp, req_rx);
+ hwrm_req_hold(bp, req_tx);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_coal *hw_coal;
u16 ring_id;
- req = &req_rx;
+ req = req_rx;
if (!bnapi->rx_ring) {
ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req = &req_tx;
+ req = req_tx;
} else {
ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
}
req->ring_id = cpu_to_le16(ring_id);
- rc = _hwrm_send_message(bp, req, sizeof(*req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
@@ -6297,11 +6829,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
continue;
if (bnapi->rx_ring && bnapi->tx_ring) {
- req = &req_tx;
+ req = req_tx;
ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
req->ring_id = cpu_to_le16(ring_id);
- rc = _hwrm_send_message(bp, req, sizeof(*req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
}
@@ -6311,64 +6842,77 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
hw_coal = &bp->tx_coal;
__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req_rx);
+ hwrm_req_drop(bp, req_tx);
return rc;
}
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
- int rc = 0, i;
- struct hwrm_stat_ctx_free_input req = {0};
+ struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
+ struct hwrm_stat_ctx_free_input *req;
+ int i;
if (!bp->bnapi)
- return 0;
+ return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- return 0;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+ return;
- mutex_lock(&bp->hwrm_cmd_lock);
+ if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
+ return;
+ if (BNXT_FW_MAJ(bp) <= 20) {
+ if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
+ hwrm_req_drop(bp, req);
+ return;
+ }
+ hwrm_req_hold(bp, req0);
+ }
+ hwrm_req_hold(bp, req);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
- req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+ if (req0) {
+ req0->stat_ctx_id = req->stat_ctx_id;
+ hwrm_req_send(bp, req0);
+ }
+ hwrm_req_send(bp, req);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
+ hwrm_req_drop(bp, req);
+ if (req0)
+ hwrm_req_drop(bp, req0);
}
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
{
- int rc = 0, i;
- struct hwrm_stat_ctx_alloc_input req = {0};
- struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ int rc, i;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
- req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
- req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
+ req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
- mutex_lock(&bp->hwrm_cmd_lock);
+ resp = hwrm_req_hold(bp, req);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+ req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
@@ -6376,21 +6920,25 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
- struct hwrm_func_qcfg_input req = {0};
- struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u32 min_db_offset = 0;
u16 flags;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto func_qcfg_exit;
@@ -6412,6 +6960,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+ if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
+ bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -6432,44 +6982,84 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
+ if (bp->db_size)
+ goto func_qcfg_exit;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ }
+ bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
+ bp->db_size <= min_db_offset)
+ bp->db_size = pci_resource_len(bp->pdev, 2);
+
func_qcfg_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
+ struct hwrm_func_backing_store_qcaps_output *resp)
+{
+ struct bnxt_mem_init *mem_init;
+ u16 init_mask;
+ u8 init_val;
+ u8 *offset;
+ int i;
+
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+ offset = &resp->qp_init_offset;
+ mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
+ mem_init->init_val = init_val;
+ mem_init->offset = BNXT_MEM_INVALID_OFFSET;
+ if (!init_mask)
+ continue;
+ if (i == BNXT_CTX_MEM_INIT_STAT)
+ offset = &resp->stat_init_offset;
+ if (init_mask & (1 << i))
+ mem_init->offset = *offset * 4;
+ else
+ mem_init->init_val = 0;
+ }
+ ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+}
+
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
- struct hwrm_func_backing_store_qcaps_input req = {0};
- struct hwrm_func_backing_store_qcaps_output *resp =
- bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_backing_store_qcaps_output *resp;
+ struct hwrm_func_backing_store_qcaps_input *req;
int rc;
if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ int i, tqm_rings;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
- ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
@@ -6501,26 +7091,40 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
- ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+
+ bnxt_init_ctx_initializer(ctx, resp);
+
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
+
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
+ ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+ bp->ctx = ctx;
} else {
rc = 0;
}
ctx_err:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
+ if (!rmem->nr_pages)
+ return;
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -6541,103 +7145,111 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
{
- struct hwrm_func_backing_store_cfg_input req = {0};
+ struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ void **__req = (void **)&req;
+ u32 req_len = sizeof(*req);
__le32 *num_entries;
__le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr;
- int i, rc;
u32 ena;
+ int rc;
+ int i;
if (!ctx)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
- req.enables = cpu_to_le32(enables);
+ if (req_len > bp->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
+ rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
+ if (rc)
+ return rc;
+ req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
ctx_pg = &ctx->qp_mem;
- req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
- req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
- req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
- req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+ req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+ req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.qpc_pg_size_qpc_lvl,
- &req.qpc_page_dir);
+ &req->qpc_pg_size_qpc_lvl,
+ &req->qpc_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
ctx_pg = &ctx->srq_mem;
- req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
- req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
- req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+ req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.srq_pg_size_srq_lvl,
- &req.srq_page_dir);
+ &req->srq_pg_size_srq_lvl,
+ &req->srq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
ctx_pg = &ctx->cq_mem;
- req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
- req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
- req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
- bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
- &req.cq_page_dir);
+ req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+ req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->cq_pg_size_cq_lvl,
+ &req->cq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
ctx_pg = &ctx->vnic_mem;
- req.vnic_num_vnic_entries =
+ req->vnic_num_vnic_entries =
cpu_to_le16(ctx->vnic_max_vnic_entries);
- req.vnic_num_ring_table_entries =
+ req->vnic_num_ring_table_entries =
cpu_to_le16(ctx->vnic_max_ring_table_entries);
- req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.vnic_pg_size_vnic_lvl,
- &req.vnic_page_dir);
+ &req->vnic_pg_size_vnic_lvl,
+ &req->vnic_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
ctx_pg = &ctx->stat_mem;
- req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
- req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+ req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.stat_pg_size_stat_lvl,
- &req.stat_page_dir);
+ &req->stat_pg_size_stat_lvl,
+ &req->stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem;
- req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctx->mrav_num_entries_units)
flags |=
FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
- req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.mrav_pg_size_mrav_lvl,
- &req.mrav_page_dir);
+ &req->mrav_pg_size_mrav_lvl,
+ &req->mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
ctx_pg = &ctx->tim_mem;
- req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
- req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
+ req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
- &req.tim_pg_size_tim_lvl,
- &req.tim_page_dir);
+ &req->tim_pg_size_tim_lvl,
+ &req->tim_page_dir);
}
- for (i = 0, num_entries = &req.tqm_sp_num_entries,
- pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
- pg_dir = &req.tqm_sp_page_dir,
+ for (i = 0, num_entries = &req->tqm_sp_num_entries,
+ pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
+ pg_dir = &req->tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+ req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
ctx_pg = ctx->tqm_mem[i];
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
- req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ req->flags = cpu_to_le32(flags);
+ return hwrm_req_send(bp, req);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -6656,13 +7268,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, bool use_init_val)
+ u8 depth, struct bnxt_mem_init *mem_init)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
if (!mem_size)
- return 0;
+ return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -6694,8 +7306,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -6710,8 +7321,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -6746,7 +7356,7 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
-static void bnxt_free_ctx_mem(struct bnxt *bp)
+void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
int i;
@@ -6755,7 +7365,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
if (ctx->tqm_mem[0]) {
- for (i = 0; i < bp->max_q + 1; i++)
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
@@ -6775,7 +7385,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_mem_init *init;
u32 mem_size, ena, entries;
+ u32 entries_sp, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
@@ -6801,39 +7413,54 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->qp_entry_size) {
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->srq_entry_size) {
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->cq_entry_size) {
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->vnic_entry_size) {
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->stat_entry_size) {
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@@ -6846,10 +7473,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
- if (rc)
- return rc;
+ if (ctx->mrav_entry_size) {
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
+ if (rc)
+ return rc;
+ }
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
@@ -6858,50 +7488,59 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tim_entry_size) {
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- entries = ctx->qp_max_l2_entries + extra_qps;
+ min = ctx->tqm_min_entries_per_ring;
+ entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ ctx_pg->entries = i ? entries : entries_sp;
+ if (ctx->tqm_entry_size) {
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
+ NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
- if (rc)
+ if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
- else
- ctx->flags |= BNXT_CTX_FLAG_INITED;
-
+ return rc;
+ }
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
return 0;
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
- struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_resource_qcaps_input req = {0};
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct hwrm_func_resource_qcaps_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
- req.fid = cpu_to_le16(0xffff);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (rc)
goto hwrm_func_resc_qcaps_exit;
@@ -6942,23 +7581,89 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
}
hwrm_func_resc_qcaps_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_ptp_qcfg_output *resp;
+ struct hwrm_port_mac_ptp_qcfg_input *req;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ bool phc_cfg;
+ u8 flags;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10801) {
+ rc = -ENODEV;
+ goto no_ptp;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
+ if (rc)
+ goto no_ptp;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto exit;
+
+ flags = resp->flags;
+ if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ rc = -ENODEV;
+ goto exit;
+ }
+ if (!ptp) {
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ptp->bp = bp;
+ bp->ptp_cfg = ptp;
+ }
+ if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+ ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
+ ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
+ } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
+ ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
+ } else {
+ rc = -ENODEV;
+ goto exit;
+ }
+ phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp, phc_cfg);
+ if (rc)
+ netdev_warn(bp->dev, "PTP initialization failed.\n");
+exit:
+ hwrm_req_drop(bp, req);
+ if (!rc)
+ return 0;
+
+no_ptp:
+ bnxt_ptp_clear(bp);
+ kfree(ptp);
+ bp->ptp_cfg = NULL;
return rc;
}
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
- int rc = 0;
- struct hwrm_func_qcaps_input req = {0};
- struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags;
+ u32 flags, flags_ext, flags_ext2;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
- req.fid = cpu_to_le16(0xffff);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto hwrm_func_qcaps_exit;
@@ -6977,9 +7682,30 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+ if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
+
+ flags_ext = le32_to_cpu(resp->flags_ext);
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+
+ flags_ext2 = le32_to_cpu(resp->flags_ext2);
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
bp->tx_push_thresh = 0;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+ if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+ BNXT_FW_MAJ(bp) > 217)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7010,6 +7736,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
+ __bnxt_hwrm_ptp_qcfg(bp);
+ } else {
+ bnxt_ptp_clear(bp);
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -7020,19 +7753,48 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
}
hwrm_func_qcaps_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
+static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
+{
+ struct hwrm_dbg_qcaps_output *resp;
+ struct hwrm_dbg_qcaps_input *req;
+ int rc;
+
+ bp->fw_dbg_cap = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto hwrm_dbg_qcaps_exit;
+
+ bp->fw_dbg_cap = le32_to_cpu(resp->flags);
+
+hwrm_dbg_qcaps_exit:
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
+
+ bnxt_hwrm_dbg_qcaps(bp);
+
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
@@ -7051,19 +7813,20 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
{
- struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
- int rc = 0;
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
u32 flags;
+ int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
return 0;
- resp = bp->hwrm_cmd_resp_addr;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto hwrm_cfa_adv_qcaps_exit;
@@ -7073,16 +7836,120 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
hwrm_cfa_adv_qcaps_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
+static int __bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ if (bp->fw_health)
+ return 0;
+
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+ if (!bp->fw_health)
+ return -ENOMEM;
+
+ mutex_init(&bp->fw_health->lock);
+ return 0;
+}
+
+static int bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ rc = __bnxt_alloc_fw_health(bp);
+ if (rc) {
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
+{
+ writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+}
+
+static void bnxt_inv_fw_health_reg(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_type;
+
+ if (!fw_health)
+ return;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->status_reliable = false;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->resets_reliable = false;
+}
+
+static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
+{
+ void __iomem *hs;
+ u32 status_loc;
+ u32 reg_type;
+ u32 sig;
+
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+
+ __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
+ hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
+
+ sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
+ if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
+ if (!bp->chip_num) {
+ __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
+ bp->chip_num = readl(bp->bar0 +
+ BNXT_FW_HEALTH_WIN_BASE +
+ BNXT_GRC_REG_CHIP_NUM);
+ }
+ if (!BNXT_CHIP_P5(bp))
+ return;
+
+ status_loc = BNXT_GRC_REG_STATUS_P5 |
+ BNXT_FW_HEALTH_REG_TYPE_BAR0;
+ } else {
+ status_loc = readl(hs + offsetof(struct hcomm_status,
+ fw_status_loc));
+ }
+
+ if (__bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware status checks\n");
+ return;
+ }
+
+ bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
+ __bnxt_map_fw_health_reg(bp, status_loc);
+ bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
+ BNXT_FW_HEALTH_WIN_OFF(status_loc);
+ }
+
+ bp->fw_health->status_reliable = true;
+}
+
static int bnxt_map_fw_health_regs(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 reg_base = 0xffffffff;
int i;
+ bp->fw_health->status_reliable = false;
+ bp->fw_health->resets_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7093,30 +7960,46 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
reg_base = reg & BNXT_GRC_BASE_MASK;
if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
return -ERANGE;
- fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
- (reg & BNXT_GRC_OFFSET_MASK);
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
if (reg_base == 0xffffffff)
return 0;
- writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
- BNXT_FW_HEALTH_WIN_MAP_OFF);
+ __bnxt_map_fw_health_reg(bp, reg_base);
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
- struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_fw_health *fw_health = bp->fw_health;
- struct hwrm_error_recovery_qcfg_input req = {0};
+ struct hwrm_error_recovery_qcfg_output *resp;
+ struct hwrm_error_recovery_qcfg_input *req;
int rc, i;
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto err_recovery_out;
fw_health->flags = le32_to_cpu(resp->flags);
@@ -7158,7 +8041,7 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
resp->delay_after_reset[i];
}
err_recovery_out:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
if (!rc)
rc = bnxt_map_fw_health_regs(bp);
if (rc)
@@ -7168,26 +8051,42 @@ err_recovery_out:
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
- struct hwrm_func_reset_input req = {0};
+ struct hwrm_func_reset_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
- req.enables = 0;
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+ req->enables = 0;
+ hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
+ snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
}
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
{
- int rc = 0;
- struct hwrm_queue_qportcfg_input req = {0};
- struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
u8 i, j, *qptr;
bool no_rdma;
+ int rc = 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto qportcfg_exit;
@@ -7221,34 +8120,48 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
bp->max_lltc = bp->max_tc;
qportcfg_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
+static int bnxt_hwrm_poll(struct bnxt *bp)
{
- struct hwrm_ver_get_input req = {0};
+ struct hwrm_ver_get_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
- req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
- req.hwrm_intf_min = HWRM_VERSION_MINOR;
- req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ rc = hwrm_req_init(bp, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
- rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
- silent);
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
+ rc = hwrm_req_send(bp, req);
return rc;
}
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
- struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
- int rc;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ u32 dev_caps_cfg, hwrm_ver;
+ int rc, len;
+
+ rc = hwrm_req_init(bp, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+ hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = __bnxt_hwrm_ver_get(bp, false);
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto hwrm_ver_get_exit;
@@ -7263,9 +8176,35 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
- resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (bp->hwrm_spec_code > hwrm_ver)
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+ len = FW_VER_STR_LEN;
+ } else {
+ fw_maj = resp->hwrm_fw_maj_8b;
+ fw_min = resp->hwrm_fw_min_8b;
+ fw_bld = resp->hwrm_fw_bld_8b;
+ fw_rsv = resp->hwrm_fw_rsvd_8b;
+ len = BC_HWRM_STR_LEN;
+ }
+ bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+ fw_rsv);
if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
@@ -7279,6 +8218,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bp->hwrm_cmd_max_timeout)
+ bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
+ else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
+ bp->hwrm_cmd_max_timeout / 1000);
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -7314,53 +8259,148 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
hwrm_ver_get_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
int bnxt_hwrm_fw_set_time(struct bnxt *bp)
{
- struct hwrm_fw_set_time_input req = {0};
+ struct hwrm_fw_set_time_input *req;
struct tm tm;
time64_t now = ktime_get_real_seconds();
+ int rc;
if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
bp->hwrm_spec_code < 0x10400)
return -EOPNOTSUPP;
time64_to_tm(now, 0, &tm);
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
- req.year = cpu_to_le16(1900 + tm.tm_year);
- req.month = 1 + tm.tm_mon;
- req.day = tm.tm_mday;
- req.hour = tm.tm_hour;
- req.minute = tm.tm_min;
- req.second = tm.tm_sec;
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
{
- int rc;
+ u64 sw_tmp;
+
+ hw &= mask;
+ sw_tmp = (*sw & ~mask) | hw;
+ if (hw < (*sw & mask))
+ sw_tmp += mask + 1;
+ WRITE_ONCE(*sw, sw_tmp);
+}
+
+static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
+ int count, bool ignore_zero)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
+
+ if (ignore_zero && !hw)
+ continue;
+
+ if (masks[i] == -1ULL)
+ sw_stats[i] = hw;
+ else
+ bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
+ }
+}
+
+static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
+{
+ if (!stats->hw_stats)
+ return;
+
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ stats->hw_masks, stats->len / 8, false);
+}
+
+static void bnxt_accumulate_all_stats(struct bnxt *bp)
+{
+ struct bnxt_stats_mem *ring0_stats;
+ bool ignore_zero = false;
+ int i;
+
+ /* Chip bug. Counter intermittently becomes 0. */
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ ignore_zero = true;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ if (!i)
+ ring0_stats = stats;
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ ring0_stats->hw_masks,
+ ring0_stats->len / 8, ignore_zero);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ struct bnxt_stats_mem *stats = &bp->port_stats;
+ __le64 *hw_stats = stats->hw_stats;
+ u64 *sw_stats = stats->sw_stats;
+ u64 *masks = stats->hw_masks;
+ int cnt;
+
+ cnt = sizeof(struct rx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+
+ hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ cnt = sizeof(struct tx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ bnxt_accumulate_stats(&bp->rx_port_stats_ext);
+ bnxt_accumulate_stats(&bp->tx_port_stats_ext);
+ }
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
+{
+ struct hwrm_port_qstats_input *req;
struct bnxt_pf_info *pf = &bp->pf;
- struct hwrm_port_qstats_input req = {0};
+ int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
- req.port_id = cpu_to_le16(pf->port_id);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
+ if (rc)
+ return rc;
+
+ req->flags = flags;
+ req->port_id = cpu_to_le16(pf->port_id);
+ req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET);
+ req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
+ return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
{
- struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
- struct hwrm_port_qstats_ext_input req = {0};
+ struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
+ struct hwrm_queue_pri2cos_qcfg_input *req_qc;
+ struct hwrm_port_qstats_ext_output *resp_qs;
+ struct hwrm_port_qstats_ext_input *req_qs;
struct bnxt_pf_info *pf = &bp->pf;
u32 tx_stat_size;
int rc;
@@ -7368,81 +8408,89 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
- req.port_id = cpu_to_le16(pf->port_id);
- req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- tx_stat_size = bp->hw_tx_port_stats_ext ?
- sizeof(*bp->hw_tx_port_stats_ext) : 0;
- req.tx_stat_size = cpu_to_le16(tx_stat_size);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
+ if (rc)
+ return rc;
+
+ req_qs->flags = flags;
+ req_qs->port_id = cpu_to_le16(pf->port_id);
+ req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
+ req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
+ tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
+ sizeof(struct tx_port_stats_ext) : 0;
+ req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
+ req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
+ resp_qs = hwrm_req_hold(bp, req_qs);
+ rc = hwrm_req_send(bp, req_qs);
if (!rc) {
- bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+ bp->fw_rx_stats_ext_size =
+ le16_to_cpu(resp_qs->rx_stat_size) / 8;
+ if (BNXT_FW_MAJ(bp) < 220 &&
+ bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
+ bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
+
bp->fw_tx_stats_ext_size = tx_stat_size ?
- le16_to_cpu(resp->tx_stat_size) / 8 : 0;
+ le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
} else {
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ hwrm_req_drop(bp, req_qs);
+
+ if (flags)
+ return rc;
+
if (bp->fw_tx_stats_ext_size <=
offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
- mutex_unlock(&bp->hwrm_cmd_lock);
bp->pri2cos_valid = 0;
return rc;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
- req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+ rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
+ if (rc)
+ return rc;
- rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
+ req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+
+ resp_qc = hwrm_req_hold(bp, req_qc);
+ rc = hwrm_req_send(bp, req_qc);
if (!rc) {
- struct hwrm_queue_pri2cos_qcfg_output *resp2;
u8 *pri2cos;
int i, j;
- resp2 = bp->hwrm_cmd_resp_addr;
- pri2cos = &resp2->pri0_cos_queue_id;
+ pri2cos = &resp_qc->pri0_cos_queue_id;
for (i = 0; i < 8; i++) {
u8 queue_id = pri2cos[i];
+ u8 queue_idx;
+ /* Per port queue IDs start from 0, 10, 20, etc */
+ queue_idx = queue_id % 10;
+ if (queue_idx > BNXT_MAX_QUEUE) {
+ bp->pri2cos_valid = false;
+ hwrm_req_drop(bp, req_qc);
+ return rc;
+ }
for (j = 0; j < bp->max_q; j++) {
if (bp->q_ids[j] == queue_id)
- bp->pri2cos[i] = j;
+ bp->pri2cos_idx[i] = queue_idx;
}
}
- bp->pri2cos_valid = 1;
+ bp->pri2cos_valid = true;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
-static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
-{
- struct hwrm_pcie_qstats_input req = {0};
+ hwrm_req_drop(bp, req_qc);
- if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
- return 0;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
- req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
- req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
- if (bp->vxlan_port_cnt) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- bp->vxlan_port_cnt = 0;
- if (bp->nge_port_cnt) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- bp->nge_port_cnt = 0;
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -7452,7 +8500,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
- else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
@@ -7506,39 +8554,46 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
+ u8 evb_mode;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
if (br_mode == BRIDGE_MODE_VEB)
- req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
+ evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
else if (br_mode == BRIDGE_MODE_VEPA)
- req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
+ evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
else
return -EINVAL;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
+ req->evb_mode = evb_mode;
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
- req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+ req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
if (size == 128)
- req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
+ req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_req_send(bp, req);
}
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
@@ -7601,7 +8656,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
{
int rc, i, nr_ctxs;
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
if (rc) {
@@ -7680,11 +8735,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
-/* Allow PF and VF with default VLAN to be in promiscuous mode */
+/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp) && !bp->vf.vlan)
+ if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
return false;
#endif
return true;
@@ -7772,22 +8827,28 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
+ else
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out;
}
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
- if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+ if (bp->dev->flags & IFF_PROMISC)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -7798,6 +8859,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -7932,10 +8994,18 @@ static void bnxt_setup_inta(struct bnxt *bp)
bp->irq_tbl[0].handler = bnxt_inta;
}
+static int bnxt_init_int_mode(struct bnxt *bp);
+
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
+ if (!bp->irq_tbl) {
+ rc = bnxt_init_int_mode(bp);
+ if (rc || !bp->irq_tbl)
+ return rc ?: -ENODEV;
+ }
+
if (bp->flags & BNXT_FLAG_USING_MSIX)
bnxt_setup_msix(bp);
else
@@ -8105,7 +9175,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -8120,7 +9190,7 @@ static int bnxt_init_inta(struct bnxt *bp)
static int bnxt_init_int_mode(struct bnxt *bp)
{
- int rc = 0;
+ int rc = -ENODEV;
if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_init_msix(bp);
@@ -8273,10 +9343,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- napi_hash_del(&bnapi->napi);
- netif_napi_del(&bnapi->napi);
+ __netif_napi_del(&bnapi->napi);
}
- /* We called napi_hash_del() before netif_napi_del(), we need
+ /* We called __netif_napi_del(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -8297,16 +9366,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
@@ -8314,16 +9383,16 @@ static void bnxt_disable_napi(struct bnxt *bp)
{
int i;
- if (!bp->bnapi)
+ if (!bp->bnapi ||
+ test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
-
- napi_disable(&bp->bnapi[i]->napi);
}
}
@@ -8331,15 +9400,21 @@ static void bnxt_enable_napi(struct bnxt *bp)
{
int i;
+ clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
- bp->bnapi[i]->in_reset = false;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = &bnapi->cp_ring;
+ if (bnapi->in_reset)
+ cpr->sw_stats.rx.rx_resets++;
+ bnapi->in_reset = false;
- if (bp->bnapi[i]->rx_ring) {
+ if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bp->bnapi[i]->napi);
+ napi_enable(&bnapi->napi);
}
}
@@ -8351,12 +9426,15 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bp->dev);
/* Stop all TX queues */
netif_tx_disable(bp->dev);
- netif_carrier_off(bp->dev);
}
void bnxt_tx_enable(struct bnxt *bp)
@@ -8366,22 +9444,54 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = 0;
+ WRITE_ONCE(txr->dev_state, 0);
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
-static void bnxt_report_link(struct bnxt *bp)
+static char *bnxt_report_fec(struct bnxt_link_info *link_info)
{
- if (bp->link_info.link_up) {
- const char *duplex;
+ u8 active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+
+ switch (active_fec) {
+ default:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ return "None";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ return "Clause 74 BaseR";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ return "Clause 91 RS(528,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ return "Clause 91 RS544_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ return "Clause 91 RS(544,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ return "Clause 91 RS272_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ return "Clause 91 RS(272,257)";
+ }
+}
+
+void bnxt_report_link(struct bnxt *bp)
+{
+ if (BNXT_LINK_IS_UP(bp)) {
+ const char *signal = "";
const char *flow_ctrl;
+ const char *duplex;
u32 speed;
u16 fec;
netif_carrier_on(bp->dev);
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (speed == SPEED_UNKNOWN) {
+ netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
+ return;
+ }
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
duplex = "full";
else
@@ -8394,95 +9504,133 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive";
else
flow_ctrl = "none";
- speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
- netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
- speed, duplex, flow_ctrl);
- if (bp->flags & BNXT_FLAG_EEE_CAP)
+ if (bp->link_info.phy_qcfg_resp.option_flags &
+ PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
+ u8 sig_mode = bp->link_info.active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
+ switch (sig_mode) {
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
+ signal = "(NRZ) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
+ signal = "(PAM4) ";
+ break;
+ default:
+ break;
+ }
+ }
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
+ speed, signal, duplex, flow_ctrl);
+ if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" :
"not active");
fec = bp->link_info.fec_cfg;
if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
- netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
(fec & BNXT_FEC_AUTONEG) ? "on" : "off",
- (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
- (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
+ bnxt_report_fec(&bp->link_info));
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
+{
+ if (!resp->supported_speeds_auto_mode &&
+ !resp->supported_speeds_force_mode &&
+ !resp->supported_pam4_speeds_auto_mode &&
+ !resp->supported_pam4_speeds_force_mode)
+ return true;
+ return false;
+}
+
static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
{
- int rc = 0;
- struct hwrm_port_phy_qcaps_input req = {0};
- struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcaps_output *resp;
+ struct hwrm_port_phy_qcaps_input *req;
+ int rc = 0;
- bp->flags &= ~BNXT_FLAG_EEE_CAP;
- if (bp->test_info)
- bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
- BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
goto hwrm_phy_qcaps_exit;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- bp->flags |= BNXT_FLAG_EEE_CAP;
eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
- if (bp->test_info)
- bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
- }
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
- if (bp->test_info)
- bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
- }
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
- if (BNXT_PF(bp))
- bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
+
+ if (bp->hwrm_spec_code >= 0x10a01) {
+ if (bnxt_phy_qcaps_no_speed(resp)) {
+ link_info->phy_state = BNXT_PHY_STATE_DISABLED;
+ netdev_warn(bp->dev, "Ethernet link disabled\n");
+ } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
+ link_info->phy_state = BNXT_PHY_STATE_ENABLED;
+ netdev_info(bp->dev, "Ethernet link enabled\n");
+ /* Phy re-enabled, reprobe the speeds */
+ link_info->support_auto_speeds = 0;
+ link_info->support_pam4_auto_speeds = 0;
+ }
}
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_pam4_speeds_auto_mode)
+ link_info->support_pam4_auto_speeds =
+ le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
bp->port_count = resp->port_cnt;
hwrm_phy_qcaps_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+static bool bnxt_support_dropped(u16 advertising, u16 supported)
+{
+ u16 diff = advertising ^ supported;
+
+ return ((supported | diff) != supported);
+}
+
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
- int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
- struct hwrm_port_phy_qcfg_input req = {0};
- struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- u8 link_up = link_info->link_up;
- u16 diff;
+ struct hwrm_port_phy_qcfg_output *resp;
+ struct hwrm_port_phy_qcfg_input *req;
+ u8 link_state = link_info->link_state;
+ bool support_changed = false;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
+ rc = 0;
+ }
return rc;
}
@@ -8502,10 +9650,17 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->force_pam4_link_speed =
+ le16_to_cpu(resp->force_pam4_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->auto_pam4_link_speeds =
+ le16_to_cpu(resp->auto_pam4_link_speed_mask);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
+ link_info->lp_auto_pam4_link_speeds =
+ resp->link_partner_pam4_adv_speeds;
link_info->preemphasis = le32_to_cpu(resp->preemphasis);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
@@ -8517,7 +9672,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
link_info->module_status = resp->module_status;
- if (bp->flags & BNXT_FLAG_EEE_CAP) {
+ if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds;
@@ -8554,37 +9709,42 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
- if (bp->hwrm_spec_code >= 0x10504)
+ if (bp->hwrm_spec_code >= 0x10504) {
link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
-
+ link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
- diff = link_info->support_auto_speeds ^ link_info->advertising;
- if ((link_info->support_auto_speeds | diff) !=
- link_info->support_auto_speeds) {
- /* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. Caller holds RTNL
- * so we can modify link settings.
- */
+ /* Check if any advertised speeds are no longer supported. The caller
+ * holds the link_lock mutex, so we can modify link_info settings.
+ */
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
- if (link_info->autoneg & BNXT_AUTONEG_SPEED)
- bnxt_hwrm_set_link_setting(bp, true, false);
+ support_changed = true;
+ }
+ if (bnxt_support_dropped(link_info->advertising_pam4,
+ link_info->support_pam4_auto_speeds)) {
+ link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
+ support_changed = true;
}
+ if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ bnxt_hwrm_set_link_setting(bp, true, false);
return 0;
}
@@ -8643,27 +9803,30 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
}
}
-static void bnxt_hwrm_set_link_common(struct bnxt *bp,
- struct hwrm_port_phy_cfg_input *req)
+static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
- u8 autoneg = bp->link_info.autoneg;
- u16 fw_link_speed = bp->link_info.req_link_speed;
- u16 advertising = bp->link_info.advertising;
-
- if (autoneg & BNXT_AUTONEG_SPEED) {
- req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
-
- req->enables |= cpu_to_le32(
- PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
- req->auto_link_speed_mask = cpu_to_le16(advertising);
-
+ if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
+ if (bp->link_info.advertising) {
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
+ }
+ if (bp->link_info.advertising_pam4) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
+ req->auto_link_pam4_speed_mask =
+ cpu_to_le16(bp->link_info.advertising_pam4);
+ }
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
- req->flags |=
- cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
- req->force_link_speed = cpu_to_le16(fw_link_speed);
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
+ } else {
+ req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ }
}
/* tell chimp that the setting takes effect immediately */
@@ -8672,18 +9835,20 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
int bnxt_hwrm_set_pause(struct bnxt *bp)
{
- struct hwrm_port_phy_cfg_input req = {0};
+ struct hwrm_port_phy_cfg_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
- bnxt_hwrm_set_pause_common(bp, &req);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+ if (rc)
+ return rc;
+
+ bnxt_hwrm_set_pause_common(bp, req);
if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
bp->link_info.force_link_chng)
- bnxt_hwrm_set_link_common(bp, &req);
+ bnxt_hwrm_set_link_common(bp, req);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
/* since changing of pause setting doesn't trigger any link
* change event, the driver needs to update the current pause
@@ -8696,7 +9861,6 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
bnxt_report_link(bp);
}
bp->link_info.force_link_chng = false;
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -8725,114 +9889,229 @@ static void bnxt_hwrm_set_eee(struct bnxt *bp,
int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
{
- struct hwrm_port_phy_cfg_input req = {0};
+ struct hwrm_port_phy_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
if (set_pause)
- bnxt_hwrm_set_pause_common(bp, &req);
+ bnxt_hwrm_set_pause_common(bp, req);
- bnxt_hwrm_set_link_common(bp, &req);
+ bnxt_hwrm_set_link_common(bp, req);
if (set_eee)
- bnxt_hwrm_set_eee(bp, &req);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ bnxt_hwrm_set_eee(bp, req);
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
{
- struct hwrm_port_phy_cfg_input req = {0};
+ struct hwrm_port_phy_cfg_input *req;
+ int rc;
if (!BNXT_SINGLE_PF(bp))
return 0;
- if (pci_num_vf(bp->pdev))
+ if (pci_num_vf(bp->pdev) &&
+ !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
- req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
+}
+
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
+{
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
+
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
+
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
+}
+
+static int bnxt_try_recover_fw(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ int retry = 0, rc;
+ u32 sts;
+
+ do {
+ sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ rc = bnxt_hwrm_poll(bp);
+ if (!BNXT_FW_IS_BOOTING(sts) &&
+ !BNXT_FW_IS_RECOVERING(sts))
+ break;
+ retry++;
+ } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
+
+ if (!BNXT_FW_IS_HEALTHY(sts)) {
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ rc = -ENODEV;
+ }
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ return bnxt_fw_reset_via_optee(bp);
+ }
+ return rc;
+ }
+
+ return -ENODEV;
+}
+
+static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ if (!BNXT_NEW_RM(bp))
+ return; /* no resource reservations required */
+
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
}
-static int bnxt_fw_init_one(struct bnxt *bp);
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ bnxt_clear_reservations(bp, fw_reset);
+
+ return rc;
+}
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
- struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_drv_if_change_input req = {0};
- bool resc_reinit = false, fw_reset = false;
+ struct hwrm_func_drv_if_change_output *resp;
+ struct hwrm_func_drv_if_change_input *req;
+ bool fw_reset = !bp->irq_tbl;
+ bool resc_reinit = false;
+ int rc, retry = 0;
u32 flags = 0;
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
+ if (rc)
+ return rc;
+
if (up)
- req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+ resp = hwrm_req_hold(bp, req);
+
+ hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
+ while (retry < BNXT_FW_IF_RETRY) {
+ rc = hwrm_req_send(bp, req);
+ if (rc != -EAGAIN)
+ break;
+
+ msleep(50);
+ retry++;
+ }
+
+ if (rc == -EAGAIN) {
+ hwrm_req_drop(bp, req);
+ return rc;
+ } else if (!rc) {
flags = le32_to_cpu(resp->flags);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ } else if (up) {
+ rc = bnxt_try_recover_fw(bp);
+ fw_reset = true;
+ }
+ hwrm_req_drop(bp, req);
if (rc)
return rc;
- if (!up)
+ if (!up) {
+ bnxt_inv_fw_health_reg(bp);
return 0;
+ }
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
netdev_err(bp->dev, "init int mode failed\n");
return rc;
}
- set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- }
- if (BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- if (!fw_reset) {
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
- }
}
+ rc = bnxt_cancel_reservations(bp, fw_reset);
}
- return 0;
+ return rc;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
- struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_port_led_qcaps_input req = {0};
+ struct hwrm_port_led_qcaps_output *resp;
+ struct hwrm_port_led_qcaps_input *req;
struct bnxt_pf_info *pf = &bp->pf;
int rc;
@@ -8840,12 +10119,15 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
- req.port_id = cpu_to_le16(pf->port_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(pf->port_id);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
@@ -8865,54 +10147,64 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
}
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return 0;
}
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
{
- struct hwrm_wol_filter_alloc_input req = {0};
- struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_wol_filter_alloc_output *resp;
+ struct hwrm_wol_filter_alloc_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
- req.port_id = cpu_to_le16(bp->pf.port_id);
- req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
- req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
- memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+ req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+ memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
bp->wol_filter_id = resp->wol_filter_id;
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
{
- struct hwrm_wol_filter_free_input req = {0};
+ struct hwrm_wol_filter_free_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
- req.port_id = cpu_to_le16(bp->pf.port_id);
- req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
- req.wol_filter_id = bp->wol_filter_id;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+ req->wol_filter_id = bp->wol_filter_id;
+
+ return hwrm_req_send(bp, req);
}
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
{
- struct hwrm_wol_filter_qcfg_input req = {0};
- struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_wol_filter_qcfg_output *resp;
+ struct hwrm_wol_filter_qcfg_input *req;
u16 next_handle = 0;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
- req.port_id = cpu_to_le16(bp->pf.port_id);
- req.handle = cpu_to_le16(handle);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->handle = cpu_to_le16(handle);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
next_handle = le16_to_cpu(resp->next_handle);
if (next_handle != 0) {
@@ -8923,7 +10215,7 @@ static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
}
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return next_handle;
}
@@ -8944,19 +10236,23 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static ssize_t bnxt_show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct hwrm_temp_monitor_query_input req = {0};
struct hwrm_temp_monitor_query_output *resp;
+ struct hwrm_temp_monitor_query_input *req;
struct bnxt *bp = dev_get_drvdata(dev);
- u32 temp = 0;
-
- resp = bp->hwrm_cmd_resp_addr;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
- temp = resp->temp * 1000; /* display millidegree */
- mutex_unlock(&bp->hwrm_cmd_lock);
+ u32 len = 0;
+ int rc;
- return sprintf(buf, "%u\n", temp);
+ rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
+ hwrm_req_drop(bp, req);
+ if (rc)
+ return rc;
+ return len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -8976,7 +10272,17 @@ static void bnxt_hwmon_close(struct bnxt *bp)
static void bnxt_hwmon_open(struct bnxt *bp)
{
+ struct hwrm_temp_monitor_query_input *req;
struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+ if (!rc)
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_close(bp);
+ return;
+ }
if (bp->hwmon_dev)
return;
@@ -9004,7 +10310,7 @@ static bool bnxt_eee_config_ok(struct bnxt *bp)
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
@@ -9050,21 +10356,26 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
- if (link_info->req_link_speed != link_info->force_link_speed)
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
+ link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
+ link_info->req_link_speed != link_info->force_pam4_link_speed)
update_link = true;
if (link_info->req_duplex != link_info->duplex_setting)
update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
- if (link_info->advertising != link_info->auto_link_speeds)
+ if (link_info->advertising != link_info->auto_link_speeds ||
+ link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
update_link = true;
}
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -9099,6 +10410,28 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+static int bnxt_reinit_after_abort(struct bnxt *bp)
+{
+ int rc;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EBUSY;
+
+ if (bp->dev->reg_state == NETREG_UNREGISTERED)
+ return -ENODEV;
+
+ rc = bnxt_fw_init_one(bp);
+ if (!rc) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (!rc) {
+ clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ }
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -9138,15 +10471,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- bnxt_enable_napi(bp);
- bnxt_debug_dev_init(bp);
-
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
+ bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
+
if (link_re_init) {
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
@@ -9162,25 +10495,31 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (irq_re_init)
- udp_tunnel_get_rx_info(bp->dev);
+ udp_tunnel_nic_reset_ntf(bp->dev);
+ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+ if (!static_key_enabled(&bnxt_xdp_locking_key))
+ static_branch_enable(&bnxt_xdp_locking_key);
+ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+ static_branch_disable(&bnxt_xdp_locking_key);
+ }
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
bnxt_tx_enable(bp);
mod_timer(&bp->timer, jiffies + bp->current_interval);
/* Poll link status and check for SFP+ module status */
+ mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
+ mutex_unlock(&bp->link_lock);
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
-open_err:
- bnxt_debug_dev_exit(bp);
- bnxt_disable_napi(bp);
-
open_err_irq:
bnxt_del_napi(bp);
@@ -9196,7 +10535,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+ rc = -EIO;
+ if (!rc)
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
dev_close(bp->dev);
@@ -9212,13 +10554,21 @@ int bnxt_half_open_nic(struct bnxt *bp)
{
int rc = 0;
- rc = bnxt_alloc_mem(bp, false);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
+ rc = -ENODEV;
+ goto half_open_err;
+ }
+
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -9226,7 +10576,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -9236,12 +10586,13 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
-static void bnxt_reenable_sriov(struct bnxt *bp)
+void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -9258,13 +10609,20 @@ static int bnxt_open(struct net_device *dev)
int rc;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
- netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
- return -ENODEV;
+ rc = bnxt_reinit_after_abort(bp);
+ if (rc) {
+ if (rc == -EBUSY)
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
+ else
+ netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
+ return -ENODEV;
+ }
}
rc = bnxt_hwrm_if_change(bp, true);
if (rc)
return rc;
+
rc = __bnxt_open_nic(bp, true, true);
if (rc) {
bnxt_hwrm_if_change(bp, false);
@@ -9305,7 +10663,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
@@ -9316,7 +10674,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi)
+ if (bp->bnapi && irq_re_init)
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
@@ -9368,53 +10726,60 @@ static int bnxt_close(struct net_device *dev)
static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
u16 *val)
{
- struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_port_phy_mdio_read_input req = {0};
+ struct hwrm_port_phy_mdio_read_output *resp;
+ struct hwrm_port_phy_mdio_read_input *req;
int rc;
if (bp->hwrm_spec_code < 0x10a00)
return -EOPNOTSUPP;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
- req.port_id = cpu_to_le16(bp->pf.port_id);
- req.phy_addr = phy_addr;
- req.reg_addr = cpu_to_le16(reg & 0x1f);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->phy_addr = phy_addr;
+ req->reg_addr = cpu_to_le16(reg & 0x1f);
if (mdio_phy_id_is_c45(phy_addr)) {
- req.cl45_mdio = 1;
- req.phy_addr = mdio_phy_id_prtad(phy_addr);
- req.dev_addr = mdio_phy_id_devad(phy_addr);
- req.reg_addr = cpu_to_le16(reg);
+ req->cl45_mdio = 1;
+ req->phy_addr = mdio_phy_id_prtad(phy_addr);
+ req->dev_addr = mdio_phy_id_devad(phy_addr);
+ req->reg_addr = cpu_to_le16(reg);
}
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
*val = le16_to_cpu(resp->reg_data);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
u16 val)
{
- struct hwrm_port_phy_mdio_write_input req = {0};
+ struct hwrm_port_phy_mdio_write_input *req;
+ int rc;
if (bp->hwrm_spec_code < 0x10a00)
return -EOPNOTSUPP;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
- req.port_id = cpu_to_le16(bp->pf.port_id);
- req.phy_addr = phy_addr;
- req.reg_addr = cpu_to_le16(reg & 0x1f);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->phy_addr = phy_addr;
+ req->reg_addr = cpu_to_le16(reg & 0x1f);
if (mdio_phy_id_is_c45(phy_addr)) {
- req.cl45_mdio = 1;
- req.phy_addr = mdio_phy_id_prtad(phy_addr);
- req.dev_addr = mdio_phy_id_devad(phy_addr);
- req.reg_addr = cpu_to_le16(reg);
+ req->cl45_mdio = 1;
+ req->phy_addr = mdio_phy_id_prtad(phy_addr);
+ req->dev_addr = mdio_phy_id_devad(phy_addr);
+ req->reg_addr = cpu_to_le16(reg);
}
- req.reg_data = cpu_to_le16(val);
+ req->reg_data = cpu_to_le16(val);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req);
}
/* rtnl_lock held */
@@ -9428,7 +10793,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
mdio->phy_id = bp->link_info.phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 mii_regval = 0;
@@ -9448,6 +10813,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
+ case SIOCSHWTSTAMP:
+ return bnxt_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return bnxt_hwtstamp_get(dev, ifr);
+
default:
/* do nothing */
break;
@@ -9460,34 +10831,37 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
{
int i;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+ u64 *sw = cpr->stats.sw_stats;
- stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
stats->rx_missed_errors +=
- le64_to_cpu(hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
+
+ stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
- stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
- stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ stats->rx_dropped +=
+ cpr->sw_stats.rx.rx_netpoll_discards +
+ cpr->sw_stats.rx.rx_oom_discards;
}
}
@@ -9502,6 +10876,7 @@ static void bnxt_add_prev_stats(struct bnxt *bp,
stats->tx_bytes += prev_stats->tx_bytes;
stats->rx_missed_errors += prev_stats->rx_missed_errors;
stats->multicast += prev_stats->multicast;
+ stats->rx_dropped += prev_stats->rx_dropped;
stats->tx_dropped += prev_stats->tx_dropped;
}
@@ -9525,19 +10900,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- struct rx_port_stats *rx = bp->hw_rx_port_stats;
- struct tx_port_stats *tx = bp->hw_tx_port_stats;
-
- stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
- stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
- stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
- le64_to_cpu(rx->rx_ovrsz_frames) +
- le64_to_cpu(rx->rx_runt_frames);
- stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
- le64_to_cpu(rx->rx_jbr_frames);
- stats->collisions = le64_to_cpu(tx->tx_total_collisions);
- stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
- stats->tx_errors = le64_to_cpu(tx->tx_err);
+ u64 *rx = bp->port_stats.sw_stats;
+ u64 *tx = bp->port_stats.sw_stats +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ stats->rx_crc_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ stats->rx_frame_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ stats->rx_length_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
+ stats->rx_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ stats->collisions =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
+ stats->tx_fifo_errors =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
+ stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
}
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
@@ -9613,7 +10995,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
- if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+ if (dev->flags & IFF_PROMISC)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
@@ -9623,7 +11005,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -9639,6 +11021,7 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct hwrm_cfa_l2_filter_free_input *req;
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -9650,19 +11033,16 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update)
goto skip_uc;
- mutex_lock(&bp->hwrm_cmd_lock);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) {
- struct hwrm_cfa_l2_filter_free_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
- -1);
-
- req.l2_filter_id = vnic->fw_l2_filter_id[i];
+ req->l2_filter_id = vnic->fw_l2_filter_id[i];
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1;
@@ -9681,18 +11061,31 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
- rc);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
+ else
+ netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
+ rc = 0;
+ } else {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ }
vnic->uc_filter_count = i;
return rc;
}
}
+ if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnxt_promisc_ok(bp))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
@@ -9731,6 +11124,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
+ /* 212 firmware is broken for aRFS */
+ if (BNXT_FW_MAJ(bp) == 212)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -9746,7 +11142,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -9786,11 +11182,12 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
@@ -9802,22 +11199,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- else
- features |= NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX;
+ vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
+ else if (vlan_features)
+ features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
}
#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp)) {
- if (bp->vf.vlan) {
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- }
- }
+ if (BNXT_VF(bp) && bp->vf.vlan)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
#endif
return features;
}
@@ -9840,7 +11231,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
flags &= ~BNXT_FLAG_TPA;
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
if (features & NETIF_F_NTUPLE)
@@ -9888,23 +11279,187 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
+static netdev_features_t bnxt_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
+
+ features = vlan_features_check(skb, features);
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
+ break;
+ case htons(ETH_P_IPV6):
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
+ break;
+ }
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf)
+{
+ struct hwrm_dbg_read_direct_output *resp;
+ struct hwrm_dbg_read_direct_input *req;
+ __le32 *dbg_reg_buf;
+ dma_addr_t mapping;
+ int rc, i;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
+ if (rc)
+ return rc;
+
+ dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
+ &mapping);
+ if (!dbg_reg_buf) {
+ rc = -ENOMEM;
+ goto dbg_rd_reg_exit;
+ }
+
+ req->host_dest_addr = cpu_to_le64(mapping);
+
+ resp = hwrm_req_hold(bp, req);
+ req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+ req->read_len32 = cpu_to_le32(num_words);
+
+ rc = hwrm_req_send(bp, req);
+ if (rc || resp->error_code) {
+ rc = -EIO;
+ goto dbg_rd_reg_exit;
+ }
+ for (i = 0; i < num_words; i++)
+ reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
+
+dbg_rd_reg_exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
u32 ring_id, u32 *prod, u32 *cons)
{
- struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_dbg_ring_info_get_input req = {0};
+ struct hwrm_dbg_ring_info_get_output *resp;
+ struct hwrm_dbg_ring_info_get_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
- req.ring_type = ring_type;
- req.fw_ring_id = cpu_to_le32(ring_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
+ if (rc)
+ return rc;
+
+ req->ring_type = ring_type;
+ req->fw_ring_id = cpu_to_le32(ring_id);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
*prod = le32_to_cpu(resp->producer_index);
*cons = le32_to_cpu(resp->consumer_index);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -9959,6 +11514,27 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
+static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ struct hwrm_ring_reset_input *req;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+ u16 cp_ring_id;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
+ if (rc)
+ return rc;
+
+ cpr = &bnapi->cp_ring;
+ cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
+ req->cmpl_ring = cpu_to_le16(cp_ring_id);
+ req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
+ req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
+ return hwrm_req_send_silent(bp, req);
+}
+
static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
if (!silent)
@@ -9995,20 +11571,26 @@ static void bnxt_fw_health_check(struct bnxt *bp)
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* Make sure it is enabled before checking the tmr_counter. */
+ smp_rmb();
if (fw_health->tmr_counter) {
fw_health->tmr_counter--;
return;
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat)
+ if (val == fw_health->last_fw_heartbeat) {
+ fw_health->arrests++;
goto fw_reset;
+ }
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt)
+ if (val != fw_health->last_fw_reset_cnt) {
+ fw_health->discoveries++;
goto fw_reset;
+ }
fw_health->tmr_counter = fw_health->tmr_multiplier;
return;
@@ -10023,7 +11605,7 @@ static void bnxt_timer(struct timer_list *t)
struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
- if (!netif_running(dev))
+ if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
return;
if (atomic_read(&bp->intr_sem) != 0)
@@ -10032,8 +11614,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
- bp->stats_coal_ticks) {
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10060,6 +11641,11 @@ static void bnxt_timer(struct timer_list *t)
}
}
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
@@ -10095,15 +11681,77 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_rx_ring_reset(struct bnxt *bp)
+{
+ int i;
+
+ bnxt_rtnl_lock_sp(bp);
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bnxt_rtnl_unlock_sp(bp);
+ return;
+ }
+ /* Disable and flush TPA before resetting the RX ring */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ if (!rxr->bnapi->in_reset)
+ continue;
+
+ rc = bnxt_hwrm_rx_ring_reset(bp, i);
+ if (rc) {
+ if (rc == -EINVAL || rc == -EOPNOTSUPP)
+ netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
+ else
+ netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
+ rc);
+ bnxt_reset_task(bp, true);
+ break;
+ }
+ bnxt_free_one_rx_ring_skbs(bp, i);
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ rxr->rx_next_cons = 0;
+ rxr->bnapi->in_reset = false;
+ bnxt_alloc_one_rx_ring(bp, i);
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats.rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ }
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- /* When firmware is fatal state, disable PCI device to prevent
- * any potential bad DMAs before freeing kernel memory.
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ u16 val = 0;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff)
+ bp->fw_reset_min_dsecs = 0;
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
+ }
__bnxt_close_nic(bp, true, false);
+ bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
@@ -10137,16 +11785,23 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 wait_dsecs;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
- if (fw_health->master) {
+ if (fw_health->primary) {
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
wait_dsecs = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
@@ -10199,9 +11854,16 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
if (bp->pf.active_vfs &&
!test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
n = bnxt_get_registered_vfs(bp);
@@ -10268,7 +11930,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->missed_irqs++;
+ cpr->sw_stats.cmn.missed_irqs++;
}
}
}
@@ -10289,8 +11951,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
link_info->advertising = link_info->auto_link_speeds;
+ link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
} else {
link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ if (link_info->force_pam4_link_speed) {
+ link_info->req_link_speed =
+ link_info->force_pam4_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ }
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -10300,6 +11969,20 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+static void bnxt_fw_echo_reply(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_func_echo_response_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
+ if (rc)
+ return;
+ req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+ req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+ hwrm_req_send(bp, req);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -10318,28 +12001,10 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
- if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->vxlan_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->nge_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_port_qstats(bp);
- bnxt_hwrm_port_qstats_ext(bp);
- bnxt_hwrm_pcie_qstats(bp);
+ bnxt_hwrm_port_qstats(bp, 0);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ bnxt_accumulate_all_stats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
@@ -10350,15 +12015,15 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
- if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
- &bp->sp_event))
- bnxt_init_ethtool_link_settings(bp);
-
rc = bnxt_update_link(bp, true);
- mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
+
+ if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
+ &bp->sp_event))
+ bnxt_init_ethtool_link_settings(bp);
+ mutex_unlock(&bp->link_lock);
}
if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
int rc;
@@ -10385,6 +12050,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
+ if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
+ bnxt_fw_echo_reply(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -10394,13 +12062,20 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
- bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+ if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
+ bnxt_rx_ring_reset(bp);
+
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_devlink_health_fw_report(bp);
+ else
+ bnxt_fw_reset(bp);
+ }
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
if (!is_bnxt_fw_ok(bp))
- bnxt_devlink_health_report(bp,
- BNXT_FW_EXCEPTION_SP_EVENT);
+ bnxt_devlink_health_fw_report(bp);
}
smp_mb__before_atomic();
@@ -10474,7 +12149,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
struct bnxt_coal *coal;
+ u16 flags = 0;
+
+ if (coal_cap->cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -10487,6 +12168,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
+ coal->flags = flags;
coal = &bp->tx_coal;
coal->coal_ticks = 28;
@@ -10494,48 +12176,29 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
+ coal->flags = flags;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static void bnxt_alloc_fw_health(struct bnxt *bp)
-{
- if (bp->fw_health)
- return;
-
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
- !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- return;
-
- bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
- if (!bp->fw_health) {
- netdev_warn(bp->dev, "Failed to allocate fw_health\n");
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- }
-}
-
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
- if (rc)
- return rc;
-
- if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
- rc = bnxt_alloc_kong_hwrm_resources(bp);
+ bnxt_try_map_fw_health_reg(bp);
+ if (rc) {
+ rc = bnxt_try_recover_fw(bp);
if (rc)
- bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
- }
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
- rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ return rc;
+ rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
}
+
+ bnxt_nvm_cfg_ver_get(bp);
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
return -ENODEV;
@@ -10561,11 +12224,14 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
- bnxt_alloc_fw_health(bp);
- rc = bnxt_hwrm_error_recovery_qcfg(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
- rc);
+ if (bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware error recovery\n");
+ } else {
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+ }
rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
@@ -10626,7 +12292,9 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
bnxt_hwrm_coal_params_qcaps(bp);
}
-static int bnxt_fw_init_one(struct bnxt *bp)
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
+int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -10640,15 +12308,13 @@ static int bnxt_fw_init_one(struct bnxt *bp)
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
return rc;
}
+ rc = bnxt_probe_phy(bp, false);
+ if (rc)
+ return rc;
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -10671,7 +12337,7 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
writel(reg_off & BNXT_GRC_BASE_MASK,
bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
writel(val, bp->bar0 + reg_off);
break;
@@ -10685,18 +12351,35 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
}
}
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ bool result = true; /* firmware will enforce if unknown */
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return result;
+
+ if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
+ return result;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req))
+ result = !!(le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
+ hwrm_req_drop(bp, req);
+ return result;
+}
+
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
int i, rc;
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
-#ifdef CONFIG_TEE_BNXT_FW
- rc = tee_bnxt_fw_load();
- if (rc)
- netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bnxt_fw_reset_via_optee(bp);
bp->fw_reset_timestamp = jiffies;
-#endif
return;
}
@@ -10704,24 +12387,43 @@ static void bnxt_reset_all(struct bnxt *bp)
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
bnxt_fw_reset_writel(bp, i);
} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
- struct hwrm_fw_reset_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
- req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+ struct hwrm_fw_reset_input *req;
+
+ rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
+ if (!rc) {
+ req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
+ req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+ req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+ rc = hwrm_req_send(bp, req);
+ }
+ if (rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
}
bp->fw_reset_timestamp = jiffies;
}
+static bool bnxt_fw_reset_timeout(struct bnxt *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
+{
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+ bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_fw_status_update(bp, false);
+ }
+ bp->fw_reset_state = 0;
+ dev_close(bp->dev);
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
- int rc;
+ int rc = 0;
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
@@ -10739,8 +12441,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_timestamp));
goto fw_reset_abort;
} else if (n > 0) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
@@ -10752,6 +12453,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_timestamp = jiffies;
rtnl_lock();
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
+ }
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@@ -10769,13 +12475,12 @@ static void bnxt_fw_reset_task(struct work_struct *work)
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
- !time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ !bnxt_fw_reset_timeout(bp)) {
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
- if (!bp->fw_health->master) {
+ if (!bp->fw_health->primary) {
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
@@ -10784,45 +12489,56 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
}
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_RESET_FW:
bnxt_reset_all(bp);
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
case BNXT_FW_RESET_STATE_ENABLE_DEV:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- u32 val;
-
- val = bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_INPROG_REG);
- if (val)
- netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
- val);
+ bnxt_inv_fw_health_reg(bp);
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ !bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ rc = -ETIMEDOUT;
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
+ }
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
+ !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
+ bnxt_dl_remote_reload(bp);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ rc = -ENODEV;
goto fw_reset_abort;
}
pci_set_master(bp->pdev);
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_POLL_FW:
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
- rc = __bnxt_hwrm_ver_get(bp, true);
+ rc = bnxt_hwrm_poll(bp);
if (rc) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted\n");
- goto fw_reset_abort;
+ goto fw_reset_abort_status;
}
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
while (!rtnl_trylock()) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
@@ -10830,32 +12546,46 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
rc = bnxt_open(bp->dev);
if (rc) {
- netdev_err(bp->dev, "bnxt_open_nic() failed\n");
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
}
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+ bp->fw_health->enabled) {
+ bp->fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, rc);
- if (!rc)
- bnxt_reenable_sriov(bp);
- bnxt_dl_health_recovery_done(bp);
- bnxt_dl_health_status_update(bp, true);
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
+ bnxt_ptp_reapply_pps(bp);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
+ bnxt_dl_health_fw_recovery_done(bp);
+ bnxt_dl_health_fw_status_update(bp, true);
+ }
rtnl_unlock();
break;
}
return;
+fw_reset_abort_status:
+ if (bp->fw_health->status_reliable ||
+ (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
+ }
fw_reset_abort:
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
- bnxt_dl_health_status_update(bp, false);
- bp->fw_reset_state = 0;
rtnl_lock();
- dev_close(bp->dev);
+ bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
}
@@ -10889,7 +12619,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "System does not support DMA, aborting\n");
- goto init_err_disable;
+ rc = -EIO;
+ goto init_err_release;
}
pci_set_master(pdev);
@@ -10897,6 +12628,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->dev = dev;
bp->pdev = pdev;
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
+ * determines the BAR size.
+ */
bp->bar0 = pci_ioremap_bar(pdev, 0);
if (!bp->bar0) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
@@ -10904,13 +12638,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- bp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!bp->bar1) {
- dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
bp->bar2 = pci_ioremap_bar(pdev, 4);
if (!bp->bar2) {
dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
@@ -10931,11 +12658,12 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bnxt_init_dflt_coal(bp);
-
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
+
clear_bit(BNXT_STATE_OPEN, &bp->state);
return 0;
@@ -10967,7 +12695,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (rc)
return rc;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -11175,8 +12903,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -11263,84 +12991,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_udp_tunnel_add(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
- return;
+ struct bnxt *bp = netdev_priv(netdev);
+ struct udp_tunnel_info ti;
+ unsigned int cmd;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = ti->port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (bp->nge_port_cnt && bp->nge_port != ti->port)
- return;
+ udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- bp->nge_port_cnt++;
- if (bp->nge_port_cnt == 1) {
- bp->nge_port = ti->port;
- set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
- }
- break;
- default:
- return;
- }
+ if (ti.port)
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
- bnxt_queue_sp_work(bp);
+ return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
-static void bnxt_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
- return;
- bp->vxlan_port_cnt--;
-
- if (bp->vxlan_port_cnt != 0)
- return;
-
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!bp->nge_port_cnt || bp->nge_port != ti->port)
- return;
- bp->nge_port_cnt--;
-
- if (bp->nge_port_cnt != 0)
- return;
-
- set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- default:
- return;
- }
-
- bnxt_queue_sp_work(bp);
-}
+static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+ .sync_table = bnxt_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
@@ -11418,12 +13095,13 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_stop = bnxt_close,
.ndo_get_stats64 = bnxt_get_stats64,
.ndo_set_rx_mode = bnxt_set_rx_mode,
- .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_eth_ioctl = bnxt_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnxt_change_mac_addr,
.ndo_change_mtu = bnxt_change_mtu,
.ndo_fix_features = bnxt_fix_features,
.ndo_set_features = bnxt_set_features,
+ .ndo_features_check = bnxt_features_check,
.ndo_tx_timeout = bnxt_tx_timeout,
#ifdef CONFIG_BNXT_SRIOV
.ndo_get_vf_config = bnxt_get_vf_config,
@@ -11438,8 +13116,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
- .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -11455,28 +13131,39 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- bnxt_dl_fw_reporters_destroy(bp, true);
+ if (BNXT_PF(bp))
+ devlink_port_type_clear(&bp->dl_port);
+
+ bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ /* Flush any pending tasks */
+ cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ bp->sp_event = 0;
+
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
- bnxt_free_hwrm_short_cmd_req(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -11486,17 +13173,24 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
+ bp->phy_flags = 0;
rc = bnxt_hwrm_phy_qcaps(bp);
if (rc) {
netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
rc);
return rc;
}
+ if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
+ bp->dev->priv_flags |= IFF_SUPP_NOFCS;
+ else
+ bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
if (!fw_dflt)
return 0;
+ mutex_lock(&bp->link_lock);
rc = bnxt_update_link(bp, false);
if (rc) {
+ mutex_unlock(&bp->link_lock);
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
rc);
return rc;
@@ -11509,6 +13203,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
link_info->support_auto_speeds = link_info->support_speeds;
bnxt_init_ethtool_link_settings(bp);
+ mutex_unlock(&bp->link_lock);
return 0;
}
@@ -11652,7 +13347,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (sh)
@@ -11661,7 +13356,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
/* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
@@ -11669,6 +13364,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->rx_nr_rings++;
bp->cp_nr_rings++;
}
+ if (rc) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
return rc;
}
@@ -11683,7 +13382,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ else
+ netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
@@ -11691,10 +13393,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
@@ -11730,7 +13431,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
int rc = 0;
if (BNXT_PF(bp)) {
- memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -11738,7 +13439,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, vf->mac_addr);
/* Older PF driver or firmware may not approve this
* correctly.
*/
@@ -11752,30 +13453,78 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+static void bnxt_vpd_read_info(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_serialno, &vpd_data[pos], size);
+exit:
+ kfree(vpd_data);
+}
+
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
{
struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
+ u64 qword;
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN\n");
+ qword = pci_get_dsn(pdev);
+ if (!qword) {
+ netdev_info(bp->dev, "Unable to read adapter's DSN\n");
return -EOPNOTSUPP;
}
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
+ put_unaligned_le64(qword, dsn);
+
bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
+static int bnxt_map_db_bar(struct bnxt *bp)
+{
+ if (!bp->db_size)
+ return -ENODEV;
+ bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
+ if (!bp->bar1)
+ return -ENOMEM;
+ return 0;
+}
+
+void bnxt_print_device_info(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[bp->board_idx].name,
+ (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
+
+ pcie_print_link_status(bp->pdev);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -11783,9 +13532,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -11800,9 +13546,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->board_idx = ent->driver_data;
+ bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
- if (bnxt_vf_pciid(ent->driver_data))
+ if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
if (pdev->msix_cap)
@@ -11828,13 +13576,30 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (BNXT_CHIP_P5(bp))
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
+
+ if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
+ if (BNXT_CHIP_SR2(bp))
+ bp->flags |= BNXT_FLAG_CHIP_SR2;
+ }
+
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_map_db_bar(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto init_err_pci_clean;
+ }
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -11852,11 +13617,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
@@ -11866,7 +13635,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
@@ -11878,8 +13646,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- bp->ulp_probe = bnxt_ulp_probe;
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -11905,14 +13671,20 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_ring_params(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
- rc = -ENOMEM;
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ } else {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ }
goto init_err_pci_clean;
}
bnxt_fw_init_one_p3(bp);
- if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ bnxt_init_dflt_coal(bp);
+
+ if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
rc = bnxt_init_int_mode(bp);
@@ -11930,13 +13702,20 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
create_singlethread_workqueue("bnxt_pf_wq");
if (!bnxt_pf_wq) {
dev_err(&pdev->dev, "Unable to create workqueue.\n");
+ rc = -ENOMEM;
goto init_err_pci_clean;
}
}
- bnxt_init_tc(bp);
+ rc = bnxt_init_tc(bp);
+ if (rc)
+ netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
+ rc);
}
- bnxt_dl_register(bp);
+ bnxt_inv_fw_health_reg(bp);
+ rc = bnxt_dl_register(bp);
+ if (rc)
+ goto init_err_dl;
rc = register_netdev(dev);
if (rc)
@@ -11946,28 +13725,32 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
- netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
- board_info[ent->driver_data].name,
- (long)pci_resource_start(pdev, 0), dev->dev_addr);
- pcie_print_link_status(pdev);
+ bnxt_print_device_info(bp);
+ pci_save_state(pdev);
return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
+init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
- bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
+ bnxt_ethtool_free(bp);
+ bnxt_ptp_clear(bp);
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
init_err_free:
free_netdev(dev);
@@ -12049,19 +13832,9 @@ static int bnxt_resume(struct device *device)
goto resume_exit;
}
- if (bnxt_hwrm_queue_qportcfg(bp)) {
- rc = -ENODEV;
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
goto resume_exit;
- }
-
- if (bp->hwrm_spec_code >= 0x10803) {
- if (bnxt_alloc_ctx_mem(bp)) {
- rc = -ENODEV;
- goto resume_exit;
- }
- }
- if (BNXT_NEW_RM(bp))
- bnxt_hwrm_func_resc_qcaps(bp, false);
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
@@ -12077,6 +13850,8 @@ static int bnxt_resume(struct device *device)
resume_exit:
bnxt_ulp_start(bp, rc);
+ if (!rc)
+ bnxt_reenable_sriov(bp);
rtnl_unlock();
return rc;
}
@@ -12116,10 +13891,17 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ if (state == pci_channel_io_frozen)
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
if (netif_running(netdev))
bnxt_close(netdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
rtnl_unlock();
/* Request a slot slot reset. */
@@ -12137,10 +13919,12 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ int retry = 0;
int err = 0;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+ int off;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12151,22 +13935,56 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ /* Upon fatal error, our device internal logic that latches to
+ * BAR value is getting reset and will restore only upon
+ * rewritting the BARs.
+ *
+ * As pci_restore_state() does not re-write the BARs if the
+ * value is same as saved value earlier, driver needs to
+ * write the BARs to 0 to force restore, in case of fatal error.
+ */
+ if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+ &bp->state)) {
+ for (off = PCI_BASE_ADDRESS_0;
+ off <= PCI_BASE_ADDRESS_5; off += 4)
+ pci_write_config_dword(bp->pdev, off, 0);
+ }
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
- err = bnxt_hwrm_func_reset(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
+ bnxt_inv_fw_health_reg(bp);
+ bnxt_try_map_fw_health_reg(bp);
+ /* In some PCIe AER scenarios, firmware may take up to
+ * 10 seconds to become ready in the worst case.
+ */
+ do {
+ err = bnxt_try_recover_fw(bp);
+ if (!err)
+ break;
+ retry++;
+ } while (retry < BNXT_FW_SLOT_RESET_RETRY);
+
+ if (err) {
+ dev_err(&pdev->dev, "Firmware not ready\n");
+ goto reset_exit;
+ }
+
+ err = bnxt_hwrm_func_reset(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- bnxt_ulp_start(bp, err);
- }
- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
- dev_close(netdev);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ err = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, err);
+ }
+reset_exit:
+ bnxt_clear_reservations(bp, true);
rtnl_unlock();
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
@@ -12179,10 +13997,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index cabef0b4f5fb..d5fa43cfe524 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,12 +12,15 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.1"
+/* DO NOT CHANGE DRV_VER_* defines
+ * FIXME: Delete them
+ */
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
@@ -25,6 +28,7 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#ifdef CONFIG_TEE_BNXT_FW
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
@@ -86,6 +90,8 @@ struct tx_bd_ext {
#define TX_BD_CFA_META_KEY_VLAN (1 << 28)
};
+#define BNXT_TX_PTP_IS_SET(lflags) ((lflags) & cpu_to_le32(TX_BD_FLAGS_STAMP))
+
struct rx_bd {
__le32 rx_bd_len_flags_type;
#define RX_BD_TYPE (0x3f << 0)
@@ -156,6 +162,7 @@ struct rx_cmp {
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
#define RX_CMP_FLAGS_UNUSED (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
#define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
#define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
@@ -237,7 +244,7 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
- __le32 rx_cmp_unused3;
+ __le32 rx_cmp_timestamp;
};
#define RX_CMP_L2_ERRORS \
@@ -482,6 +489,15 @@ struct rx_tpa_end_cmp_ext {
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
@@ -490,6 +506,16 @@ struct rx_tpa_end_cmp_ext {
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -535,6 +561,9 @@ struct nqe_cn {
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -562,9 +591,12 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+#define BNXT_PAGE_MODE_BUF_SIZE \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ BNXT_PAGE_MODE_BUF_SIZE - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
#define BNXT_MIN_PKT_SIZE 52
@@ -577,15 +609,17 @@ struct nqe_cn {
#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
-#define MAX_RX_PAGES 1
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
#define MAX_RX_AGG_PAGES 4
#define MAX_TX_PAGES 1
-#define MAX_CP_PAGES 8
+#define MAX_CP_PAGES 16
#else
-#define MAX_RX_PAGES 8
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
-#define MAX_CP_PAGES 64
+#define MAX_CP_PAGES 128
#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
@@ -603,9 +637,15 @@ struct nqe_cn {
#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
@@ -647,37 +687,7 @@ struct nqe_cn {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
-#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
-#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
-#define SHORT_HWRM_CMD_TIMEOUT 20
-#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
-#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
-#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
-#define HWRM_RESP_ERR_CODE_MASK 0xffff
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_RESP_LEN_MASK 0xffff0000
-#define HWRM_RESP_LEN_SFT 16
-#define HWRM_RESP_VALID_MASK 0xff000000
-#define BNXT_HWRM_REQ_MAX_SIZE 128
-#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
- BNXT_HWRM_REQ_MAX_SIZE)
-#define HWRM_SHORT_MIN_TIMEOUT 3
-#define HWRM_SHORT_MAX_TIMEOUT 10
-#define HWRM_SHORT_TIMEOUT_COUNTER 5
-
-#define HWRM_MIN_TIMEOUT 25
-#define HWRM_MAX_TIMEOUT 40
-
-#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
- ((n) * HWRM_SHORT_MIN_TIMEOUT) : \
- (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
- ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
-
-#define HWRM_VALID_BIT_DELAY_USEC 150
-
-#define BNXT_HWRM_CHNL_CHIMP 0
-#define BNXT_HWRM_CHNL_KONG 1
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
@@ -691,13 +701,12 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
u8 is_gso;
u8 is_push;
u8 action;
- union {
- unsigned short nr_frags;
- u16 rx_prod;
- };
+ unsigned short nr_frags;
+ u16 rx_prod;
};
struct bnxt_sw_rx_bd {
@@ -712,6 +721,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
+struct bnxt_mem_init {
+ u8 init_val;
+ u16 offset;
+#define BNXT_MEM_INVALID_OFFSET 0xffff
+ u16 size;
+};
+
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -721,7 +737,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- u8 init_val;
+ struct bnxt_mem_init *mem_init;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -770,6 +786,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
+ u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
@@ -785,6 +802,8 @@ struct bnxt_tx_ring_info {
u32 dev_state;
struct bnxt_ring_struct tx_ring_struct;
+ /* Synchronize simultaneous xdp_xmit on same ring */
+ spinlock_t xdp_tx_lock;
};
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
@@ -832,6 +851,7 @@ struct bnxt_coal {
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
+ u16 flags;
};
struct bnxt_tpa_info {
@@ -905,6 +925,31 @@ struct bnxt_rx_ring_info {
struct page_pool *page_pool;
};
+struct bnxt_rx_sw_stats {
+ u64 rx_l4_csum_errors;
+ u64 rx_resets;
+ u64 rx_buf_errors;
+ u64 rx_oom_discards;
+ u64 rx_netpoll_discards;
+};
+
+struct bnxt_cmn_sw_stats {
+ u64 missed_irqs;
+};
+
+struct bnxt_sw_stats {
+ struct bnxt_rx_sw_stats rx;
+ struct bnxt_cmn_sw_stats cmn;
+};
+
+struct bnxt_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -923,18 +968,16 @@ struct bnxt_cp_ring_info {
struct dim dim;
union {
- struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
- struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
+ struct tx_cmp **cp_desc_ring;
+ struct nqe_cn **nq_desc_ring;
};
- dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+ dma_addr_t *cp_desc_mapping;
- struct ctx_hw_stats *hw_stats;
- dma_addr_t hw_stats_map;
+ struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- u64 rx_l4_csum_errors;
- u64 rx_buf_errors;
- u64 missed_irqs;
+
+ struct bnxt_sw_stats sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1004,6 +1047,15 @@ struct bnxt_vnic_info {
__le16 *rss_table;
dma_addr_t rss_hash_key_dma_addr;
u64 *rss_hash_key;
+ int rss_table_size;
+#define BNXT_RSS_TABLE_ENTRIES_P5 64
+#define BNXT_RSS_TABLE_SIZE_P5 (BNXT_RSS_TABLE_ENTRIES_P5 * 4)
+#define BNXT_RSS_TABLE_MAX_TBL_P5 8
+#define BNXT_MAX_RSS_TABLE_SIZE_P5 \
+ (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \
+ (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+
u32 rx_mask;
u8 *mc_list;
@@ -1064,7 +1116,6 @@ struct bnxt_vf_info {
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10
- u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
void *hwrm_cmd_req_addr;
@@ -1124,8 +1175,15 @@ struct bnxt_link_info {
#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
u8 wire_speed;
- u8 loop_back;
- u8 link_up;
+ u8 phy_state;
+#define BNXT_PHY_STATE_ENABLED 0
+#define BNXT_PHY_STATE_DISABLED 1
+
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1160,6 +1218,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
+ u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1171,24 +1230,51 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
+ u16 auto_pam4_link_speeds;
+#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
+#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
+#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
u16 support_auto_speeds;
+ u16 support_pam4_auto_speeds;
u16 lp_auto_link_speeds;
+ u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
+ u16 force_pam4_link_speed;
u32 preemphasis;
u8 module_status;
+ u8 active_fec_sig_mode;
u16 fec_cfg;
+#define BNXT_FEC_NONE PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
+#define BNXT_FEC_AUTONEG_CAP PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED
#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED
#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
-#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
+#define BNXT_FEC_ENC_RS_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED
+#define BNXT_FEC_ENC_LLRS_CAP \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED)
+#define BNXT_FEC_ENC_RS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED)
+#define BNXT_FEC_ENC_LLRS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED)
/* copy of requested setting from ethtool cmd */
u8 autoneg;
#define BNXT_AUTONEG_SPEED 1
#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_signal_mode;
+#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
+#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
u16 advertising; /* user adv setting */
+ u16 advertising_pam4;
bool force_link_chng;
bool phy_retry;
@@ -1200,6 +1286,49 @@ struct bnxt_link_info {
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
};
+#define BNXT_FEC_RS544_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE)
+
+#define BNXT_FEC_RS544_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE)
+
+#define BNXT_FEC_RS272_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE)
+
+#define BNXT_FEC_RS272_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE)
+
+#define BNXT_PAM4_SUPPORTED(link_info) \
+ ((link_info)->support_pam4_speeds)
+
+#define BNXT_FEC_RS_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_ON | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_LLRS_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS272_ON | BNXT_FEC_RS544_OFF)
+
+#define BNXT_FEC_RS_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_OFF | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_BASE_R_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
+#define BNXT_FEC_ALL_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
#define BNXT_MAX_QUEUE 8
struct bnxt_queue_info {
@@ -1225,22 +1354,30 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
- u8 flags;
-#define BNXT_TEST_FL_EXT_LPBK 0x1
-#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
+#define CHIMP_REG_VIEW_ADDR \
+ ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
+#define BNXT_TS_REG_TIMESYNC_TS0_LOWER 0x640180c
+#define BNXT_TS_REG_TIMESYNC_TS0_UPPER 0x6401810
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1329,6 +1466,23 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1354,7 +1508,7 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
+ u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1366,7 +1520,31 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
+
+#define BNXT_CTX_MEM_INIT_QP 0
+#define BNXT_CTX_MEM_INIT_SRQ 1
+#define BNXT_CTX_MEM_INIT_CQ 2
+#define BNXT_CTX_MEM_INIT_VNIC 3
+#define BNXT_CTX_MEM_INIT_STAT 4
+#define BNXT_CTX_MEM_INIT_MRAV 5
+#define BNXT_CTX_MEM_INIT_MAX 6
+ struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
+};
+
+enum bnxt_health_severity {
+ SEVERITY_NORMAL = 0,
+ SEVERITY_WARNING,
+ SEVERITY_RECOVERABLE,
+ SEVERITY_FATAL,
+};
+
+enum bnxt_health_remedy {
+ REMEDY_DEVLINK_RECOVER,
+ REMEDY_POWER_CYCLE_DEVICE,
+ REMEDY_POWER_CYCLE_HOST,
+ REMEDY_FW_UPDATE,
+ REMEDY_HW_REPLACE,
};
struct bnxt_fw_health {
@@ -1386,21 +1564,27 @@ struct bnxt_fw_health {
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
- u8 master:1;
- u8 fatal:1;
+ u8 primary:1;
+ u8 status_reliable:1;
+ u8 resets_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
+ u32 echo_req_data1;
+ u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
- struct devlink_health_reporter *fw_reset_reporter;
- struct devlink_health_reporter *fw_fatal_reporter;
-};
-
-struct bnxt_fw_reporter_ctx {
- unsigned long sp_event;
+ /* Protects severity and remedy */
+ struct mutex lock;
+ enum bnxt_health_severity severity;
+ enum bnxt_health_remedy remedy;
+ u32 arrests;
+ u32 discoveries;
+ u32 survivals;
+ u32 fatalities;
+ u32 diagnoses;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
@@ -1415,8 +1599,77 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_BASE 0x3000
#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
+ ((reg) & BNXT_GRC_OFFSET_MASK))
+
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_STATUS_RECOVERING 0x400000
+
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \
+ ((sts) & BNXT_FW_STATUS_RECOVERING))
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
+
+enum board_idx {
+ BCM57301,
+ BCM57302,
+ BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
+ BCM57402,
+ BCM57404,
+ BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
+ BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57407_NPAR,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
+ BCM5745x_NPAR,
+ BCM57508,
+ BCM57504,
+ BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
+ BCM58802,
+ BCM58804,
+ BCM58808,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
+ NETXTREME_S_VF,
+ NETXTREME_C_VF_HV,
+ NETXTREME_E_VF_HV,
+ NETXTREME_E_P5_VF,
+ NETXTREME_E_P5_VF_HV,
+};
struct bnxt {
void __iomem *bar0;
@@ -1459,6 +1712,8 @@ struct bnxt {
u8 chip_rev;
+#define CHIP_NUM_58818 0xd818
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1498,6 +1753,10 @@ struct bnxt {
(chip_num) == CHIP_NUM_58804 || \
(chip_num) == CHIP_NUM_58808)
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ char board_serialno[BNXT_VPD_FLD_LEN];
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1524,7 +1783,6 @@ struct bnxt {
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
@@ -1533,6 +1791,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_CHIP_SR2 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
@@ -1540,7 +1799,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
- #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1551,20 +1809,30 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
-#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
- ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
+#define BNXT_SH_PORT_CFG_OK(bp) (BNXT_PF(bp) && \
+ ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG))
+#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \
+ BNXT_SH_PORT_CFG_OK(bp)) && \
+ (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
-/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
+#define BNXT_CHIP_SR2(bp) \
+ ((bp)->chip_num == CHIP_NUM_58818)
+
+#define BNXT_CHIP_P5_THOR(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
@@ -1577,7 +1845,6 @@ struct bnxt {
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
struct bnxt_en_dev *edev;
- struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
struct bnxt_napi **bnapi;
@@ -1631,6 +1898,8 @@ struct bnxt {
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
int nr_vnics;
+ u16 *rss_indir_tbl;
+ u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u16 max_mtu;
@@ -1655,6 +1924,18 @@ struct bnxt {
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
+#define BNXT_STATE_FW_ACTIVATE 11
+#define BNXT_STATE_RECOVER 12
+#define BNXT_STATE_FW_NON_FATAL_COND 13
+#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
+
+#define BNXT_NO_FW_ACCESS(bp) \
+ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
+ pci_channel_offline((bp)->pdev))
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1687,53 +1968,57 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
- #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
+ #define BNXT_FW_CAP_PTP_RTC 0x00400000
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS 0x00800000
+ #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
+ #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_LIVEPATCH 0x08000000
+ #define BNXT_FW_CAP_PTP_PPS 0x10000000
+ #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
+ #define BNXT_FW_CAP_RING_MONITOR 0x40000000
+ #define BNXT_FW_CAP_DBG_QCAPS 0x80000000
+
+ u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
- u16 hwrm_intr_seq_id;
- void *hwrm_short_cmd_req_addr;
- dma_addr_t hwrm_short_cmd_req_dma_addr;
- void *hwrm_cmd_resp_addr;
- dma_addr_t hwrm_cmd_resp_dma_addr;
- void *hwrm_cmd_kong_resp_addr;
- dma_addr_t hwrm_cmd_kong_resp_dma_addr;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
struct rtnl_link_stats64 net_stats_prev;
- struct rx_port_stats *hw_rx_port_stats;
- struct tx_port_stats *hw_tx_port_stats;
- struct rx_port_stats_ext *hw_rx_port_stats_ext;
- struct tx_port_stats_ext *hw_tx_port_stats_ext;
- struct pcie_ctx_hw_stats *hw_pcie_stats;
- dma_addr_t hw_rx_port_stats_map;
- dma_addr_t hw_tx_port_stats_map;
- dma_addr_t hw_rx_port_stats_ext_map;
- dma_addr_t hw_tx_port_stats_ext_map;
- dma_addr_t hw_pcie_stats_map;
- int hw_port_stats_size;
+ struct bnxt_stats_mem port_stats;
+ struct bnxt_stats_mem rx_port_stats_ext;
+ struct bnxt_stats_mem tx_port_stats_ext;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
- u8 pri2cos[8];
+ u8 pri2cos_idx[8];
u8 pri2cos_valid;
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
- int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
#define BC_HWRM_STR_LEN 21
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+
+ u16 vxlan_fw_dst_port_id;
+ u16 nge_fw_dst_port_id;
__be16 vxlan_port;
- u8 vxlan_port_cnt;
- __le16 vxlan_fw_dst_port_id;
__be16 nge_port;
- u8 nge_port_cnt;
- __le16 nge_fw_dst_port_id;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -1753,16 +2038,12 @@ struct bnxt {
#define BNXT_RX_NTP_FLTR_SP_EVENT 1
#define BNXT_LINK_CHNG_SP_EVENT 2
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3
-#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4
-#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
#define BNXT_RESET_TASK_SP_EVENT 6
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
-#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
-#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
@@ -1770,6 +2051,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -1797,18 +2079,13 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
@@ -1828,6 +2105,19 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
+#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
+#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
+#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
+#define BNXT_PHY_FL_SHARED_PORT_CFG PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
+#define BNXT_PHY_FL_PORT_STATS_NO_RESET PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
+#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
+#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -1841,6 +2131,10 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
+
+ struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
/* devlink interface and vf-rep structs */
struct devlink *dl;
@@ -1851,26 +2145,60 @@ struct bnxt {
u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
- struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
+ enum board_idx board_idx;
};
+#define BNXT_NUM_RX_RING_STATS 8
+#define BNXT_NUM_TX_RING_STATS 8
+#define BNXT_NUM_TPA_RING_STATS 4
+#define BNXT_NUM_TPA_RING_STATS_P5 5
+#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+
+#define BNXT_RING_STATS_SIZE_P5 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5) * 8)
+
+#define BNXT_RING_STATS_SIZE_P5_SR2 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+
+#define BNXT_GET_RING_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
+
+#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct rx_port_stats, counter) / 8))
+
+#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct tx_port_stats, counter) / 8))
+
+#define BNXT_PORT_STATS_SIZE \
+ (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
+
+#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
+ (sizeof(struct rx_port_stats) + 512)
+
#define BNXT_RX_STATS_OFFSET(counter) \
(offsetof(struct rx_port_stats, counter) / 8)
#define BNXT_TX_STATS_OFFSET(counter) \
((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
+ BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_RX_STATS_EXT_NUM_LEGACY \
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
+
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
-#define BNXT_PCIE_STATS_OFFSET(counter) \
- (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
+#define BNXT_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNXT_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
@@ -1890,24 +2218,36 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+static inline void bnxt_writeq(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
#if BITS_PER_LONG == 32
-#define writeq(val64, db) \
-do { \
- spin_lock(&bp->db_lock); \
- writel((val64) & 0xffffffff, db); \
- writel((val64) >> 32, (db) + 4); \
- spin_unlock(&bp->db_lock); \
-} while (0)
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
-#define writeq_relaxed writeq
+static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq_relaxed(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq_relaxed(val, addr);
#endif
+}
/* For TX and RX ring doorbells with no ordering guarantee*/
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
@@ -1922,7 +2262,7 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
@@ -1932,61 +2272,14 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
-static inline bool bnxt_cfa_hwrm_message(u16 req_type)
+/* Must hold rtnl_lock */
+static inline bool bnxt_sriov_cfg(struct bnxt *bp)
{
- switch (req_type) {
- case HWRM_CFA_ENCAP_RECORD_ALLOC:
- case HWRM_CFA_ENCAP_RECORD_FREE:
- case HWRM_CFA_DECAP_FILTER_ALLOC:
- case HWRM_CFA_DECAP_FILTER_FREE:
- case HWRM_CFA_EM_FLOW_ALLOC:
- case HWRM_CFA_EM_FLOW_FREE:
- case HWRM_CFA_EM_FLOW_CFG:
- case HWRM_CFA_FLOW_ALLOC:
- case HWRM_CFA_FLOW_FREE:
- case HWRM_CFA_FLOW_INFO:
- case HWRM_CFA_FLOW_FLUSH:
- case HWRM_CFA_FLOW_STATS:
- case HWRM_CFA_METER_PROFILE_ALLOC:
- case HWRM_CFA_METER_PROFILE_FREE:
- case HWRM_CFA_METER_PROFILE_CFG:
- case HWRM_CFA_METER_INSTANCE_ALLOC:
- case HWRM_CFA_METER_INSTANCE_FREE:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
-{
- return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
- bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
-}
-
-static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
-{
- return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
- req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
-}
-
-static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
-{
- if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
- return bp->hwrm_cmd_kong_resp_addr;
- else
- return bp->hwrm_cmd_resp_addr;
-}
-
-static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
-{
- u16 seq_id;
-
- if (dst == BNXT_HWRM_CHNL_CHIMP)
- seq_id = bp->hwrm_cmd_seq++;
- else
- seq_id = bp->hwrm_cmd_kong_seq++;
- return seq_id;
+#if defined(CONFIG_BNXT_SRIOV)
+ return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg);
+#else
+ return false;
+#endif
}
extern const u16 bnxt_lhint_arr[];
@@ -1998,17 +2291,15 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
-void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
-int _hwrm_send_message(struct bnxt *, void *, u32, int);
-int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
-int hwrm_send_message(struct bnxt *, void *, u32, int);
-int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
+void bnxt_free_ctx_mem(struct bnxt *bp);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2017,20 +2308,29 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_report_link(struct bnxt *bp);
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
+void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
+int bnxt_fw_init_one(struct bnxt *bp);
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
@@ -2038,5 +2338,5 @@ int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
-
+void bnxt_print_device_info(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
new file mode 100644
index 000000000000..c06789882036
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -0,0 +1,444 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_coredump.h"
+
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ struct hwrm_dbg_cmn_output *cmn_resp;
+ u16 seq = 0, len, segs_off;
+ dma_addr_t dma_handle;
+ void *dma_buf, *resp;
+ int rc, off = 0;
+
+ dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
+ if (!dma_buf) {
+ hwrm_req_drop(bp, msg);
+ return -ENOMEM;
+ }
+
+ hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
+ cmn_resp = hwrm_req_hold(bp, msg);
+ resp = cmn_resp;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = hwrm_req_send(bp, msg);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
+ break;
+ }
+ }
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ hwrm_req_drop(bp, msg);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ struct hwrm_dbg_coredump_list_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
+ if (rc)
+ return rc;
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
+ if (rc)
+ return rc;
+
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_req_send(bp, req);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 buf_len, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input *req;
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
+ if (rc)
+ return rc;
+
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf) {
+ info.dest_buf = buf + offset;
+ info.buf_len = buf_len;
+ info.seg_start = offset;
+ }
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
+{
+ struct mm_struct *mm = current->mm;
+ int i, len, last = 0;
+
+ if (mm) {
+ len = min_t(int, mm->arg_end - mm->arg_start,
+ sizeof(record->commandline) - 1);
+ if (len && !copy_from_user(record->commandline,
+ (char __user *)mm->arg_start, len)) {
+ for (i = 0; i < len; i++) {
+ if (record->commandline[i])
+ last = i;
+ else
+ record->commandline[i] = ' ';
+ }
+ record->commandline[last + 1] = 0;
+ return;
+ }
+ }
+
+ strscpy(record->commandline, current->comm, TASK_COMM_LEN);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strscpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year + 1900);
+ record->month = cpu_to_le16(tm.tm_mon + 1);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ bnxt_fill_cmdline(record);
+ record->total_segments = cpu_to_le32(total_segs);
+
+ if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
+ netdev_warn(bp->dev, "Unknown OS release in coredump\n");
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
+ struct coredump_segment_record *seg_record = NULL;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ if (buf)
+ buf_len = *dump_len;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ if (buf && ((offset + seg_hdr_len) >
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
+ rc = -ENOBUFS;
+ goto err;
+ }
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf, buf_len,
+ offset + seg_hdr_len);
+ if (rc && rc == -ENOBUFS)
+ goto err;
+ else if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ if (rc == -ENOBUFS)
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ return rc;
+}
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
+{
+ if (dump_type == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
+#else
+ return -EOPNOTSUPP;
+#endif
+ } else {
+ return __bnxt_get_coredump(bp, buf, dump_len);
+ }
+}
+
+static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+{
+ struct hwrm_dbg_qcfg_output *resp;
+ struct hwrm_dbg_qcfg_input *req;
+ int rc, hdr_len = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return -EOPNOTSUPP;
+
+ if (dump_type == BNXT_DUMP_CRASH &&
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ if (dump_type == BNXT_DUMP_CRASH)
+ req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto get_dump_len_exit;
+
+ if (dump_type == BNXT_DUMP_CRASH) {
+ *dump_len = le32_to_cpu(resp->crashdump_size);
+ } else {
+ /* Driver adds coredump header and "HWRM_VER_GET response"
+ * segment additionally to coredump.
+ */
+ hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
+ sizeof(struct hwrm_ver_get_output) +
+ sizeof(struct bnxt_coredump_record);
+ *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
+ }
+ if (*dump_len <= hdr_len)
+ rc = -EINVAL;
+
+get_dump_len_exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
+{
+ u32 len = 0;
+
+ if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
+ if (dump_type == BNXT_DUMP_CRASH)
+ len = BNXT_CRASH_DUMP_LEN;
+ else
+ __bnxt_get_coredump(bp, NULL, &len);
+ }
+ return len;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index 09c22f8fe399..b1a1b2fffb19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -10,6 +10,10 @@
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
@@ -63,4 +67,51 @@ struct bnxt_coredump_record {
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
+
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+ u32 seg_start;
+ u32 buf_len;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fb6f30d0d1d0..caab3d626a2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -18,6 +18,7 @@
#include <rdma/ib_verbs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB
@@ -38,39 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
- struct hwrm_queue_pri2cos_cfg_input req = {0};
- int rc = 0, i;
+ struct hwrm_queue_pri2cos_cfg_input *req;
u8 *pri2cos;
+ int rc, i;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
- req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
- QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
+ req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
+ QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
- pri2cos = &req.pri0_cos_queue_id;
+ pri2cos = &req->pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
- req.enables |= cpu_to_le32(
+ req->enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
- struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_queue_pri2cos_qcfg_input req = {0};
- int rc = 0;
+ struct hwrm_queue_pri2cos_qcfg_output *resp;
+ struct hwrm_queue_pri2cos_qcfg_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
- req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i;
@@ -84,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->prio_tc[i] = tc;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
u8 max_tc)
{
- struct hwrm_queue_cos2bw_cfg_input req = {0};
+ struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- int rc = 0, i;
void *data;
+ int rc, i;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
u8 qidx = bp->tc_to_qidx[i];
- req.enables |= cpu_to_le32(
+ req->enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
qidx);
@@ -121,39 +129,40 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
- data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
+ data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (qidx == 0) {
- req.queue_id0 = cos2bw.queue_id;
- req.unused_0 = 0;
+ req->queue_id0 = cos2bw.queue_id;
+ req->unused_0 = 0;
}
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
- struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_queue_cos2bw_qcfg_input req = {0};
+ struct hwrm_queue_cos2bw_qcfg_output *resp;
+ struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
void *data;
int rc, i;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
int tc;
- memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
@@ -169,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return 0;
}
@@ -231,7 +240,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
- struct hwrm_queue_pfcenable_cfg_input req = {0};
+ struct hwrm_queue_pfcenable_cfg_input *req;
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
@@ -267,44 +276,45 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
}
if (need_q_remap)
- rc = bnxt_queue_remap(bp, tc_mask);
+ bnxt_queue_remap(bp, tc_mask);
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
- req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
if (rc)
return rc;
- return rc;
+ req->flags = cpu_to_le32(pri_mask);
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
- struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_queue_pfcenable_qcfg_input req = {0};
+ struct hwrm_queue_pfcenable_qcfg_output *resp;
+ struct hwrm_queue_pfcenable_qcfg_input *req;
u8 pri_mask;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return 0;
}
static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
bool add)
{
- struct hwrm_fw_set_structured_data_input set = {0};
- struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_fw_set_structured_data_input *set;
+ struct hwrm_fw_get_structured_data_input *get;
struct hwrm_struct_data_dcbx_app *fw_app;
struct hwrm_struct_hdr *data;
dma_addr_t mapping;
@@ -314,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10601)
return 0;
+ rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, get);
+ hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
+
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
+ if (!data) {
+ rc = -ENOMEM;
+ goto set_app_exit;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
- get.dest_data_addr = cpu_to_le64(mapping);
- get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
- get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
- get.count = 0;
- rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+ get->dest_data_addr = cpu_to_le64(mapping);
+ get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get->count = 0;
+ rc = hwrm_req_send(bp, get);
if (rc)
goto set_app_exit;
@@ -372,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
data->len = cpu_to_le16(sizeof(*fw_app) * n);
data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
- bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
- set.src_data_addr = cpu_to_le64(mapping);
- set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
- set.hdr_cnt = 1;
- rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
+ if (rc)
+ goto set_app_exit;
+
+ set->src_data_addr = cpu_to_le64(mapping);
+ set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+ set->hdr_cnt = 1;
+ rc = hwrm_req_send(bp, set);
set_app_exit:
- dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+ hwrm_req_drop(bp, get); /* dropping get request and associated slice */
return rc;
}
static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
{
- struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_queue_dscp_qcaps_input req = {0};
+ struct hwrm_queue_dscp_qcaps_output *resp;
+ struct hwrm_queue_dscp_qcaps_input *req;
int rc;
bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
bp->max_dscp_value = 0;
}
-
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
bool add)
{
- struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct hwrm_queue_dscp2pri_cfg_input *req;
struct bnxt_dscp2pri_entry *dscp2pri;
dma_addr_t mapping;
int rc;
@@ -417,29 +439,32 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10800)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
- dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
- &mapping, GFP_KERNEL);
- if (!dscp2pri)
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
+ if (rc)
+ return rc;
+
+ dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
+ if (!dscp2pri) {
+ hwrm_req_drop(bp, req);
return -ENOMEM;
+ }
- req.src_data_addr = cpu_to_le64(mapping);
+ req->src_data_addr = cpu_to_le64(mapping);
dscp2pri->dscp = app->protocol;
if (add)
dscp2pri->mask = 0x3f;
else
dscp2pri->mask = 0;
dscp2pri->pri = app->priority;
- req.entry_cnt = cpu_to_le16(1);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
- mapping);
+ req->entry_cnt = cpu_to_le16(1);
+ rc = hwrm_req_send(bp, req);
return rc;
}
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
+ bool zero = false;
u8 max_tc = 0;
int i;
@@ -460,13 +485,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
break;
case IEEE_8021QAZ_TSA_ETS:
total_ets_bw += ets->tc_tx_bw[i];
+ zero = zero || !ets->tc_tx_bw[i];
break;
default:
return -ENOTSUPP;
}
}
- if (total_ets_bw > 100)
+ if (total_ets_bw > 100) {
+ netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
return -EINVAL;
+ }
+ if (zero && total_ets_bw == 100) {
+ netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
+ return -EINVAL;
+ }
if (max_tc >= bp->max_tc)
*tc = bp->max_tc;
@@ -479,24 +511,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
struct bnxt *bp = netdev_priv(dev);
struct ieee_ets *my_ets = bp->ieee_ets;
+ int rc;
ets->ets_cap = bp->max_tc;
if (!my_ets) {
- int rc;
-
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
return 0;
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
if (!my_ets)
- return 0;
+ return -ENOMEM;
rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
if (rc)
- return 0;
+ goto error;
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
if (rc)
- return 0;
+ goto error;
+
+ /* cache result */
+ bp->ieee_ets = my_ets;
}
ets->cbs = my_ets->cbs;
@@ -505,6 +539,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
+error:
+ kfree(my_ets);
+ return rc;
}
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
@@ -546,7 +583,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
- __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ __le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
@@ -590,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 6eed231de565..716742522161 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -23,13 +23,15 @@ struct bnxt_dcb {
struct bnxt_cos2bw_cfg {
u8 pad[3];
- u8 queue_id;
- __le32 min_bw;
- __le32 max_bw;
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- u8 tsa;
- u8 pri_lvl;
- u8 bw_weight;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
u8 unused;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index d3c93ccee86a..8a6f788f6294 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,283 +9,665 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ptp.h"
+#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
+
+static void __bnxt_fw_recover(struct bnxt *bp)
+{
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_fw_reset(bp);
+ else
+ bnxt_fw_exception(bp);
+}
static int
-bnxt_dl_flash_update(struct devlink *dl, const char *filename,
- const char *region, struct netlink_ext_ack *extack)
+bnxt_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (region)
- return -EOPNOTSUPP;
-
if (!BNXT_PF(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"flash update not supported from a VF");
return -EPERM;
}
- devlink_flash_update_begin_notify(dl);
- devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
- 0);
- rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
- devlink_flash_update_status_notify(dl, "Flashing done", region,
- 0, 0);
+ devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
- devlink_flash_update_status_notify(dl, "Flashing failed",
- region, 0, 0);
- devlink_flash_update_end_notify(dl);
+ devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0);
return rc;
}
-static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT);
+ if (remote_reset)
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS);
+
+ return hwrm_req_send(bp, req);
+}
+
+static char *bnxt_health_severity_str(enum bnxt_health_severity severity)
+{
+ switch (severity) {
+ case SEVERITY_NORMAL: return "normal";
+ case SEVERITY_WARNING: return "warning";
+ case SEVERITY_RECOVERABLE: return "recoverable";
+ case SEVERITY_FATAL: return "fatal";
+ default: return "unknown";
+ }
+}
+
+static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy)
+{
+ switch (remedy) {
+ case REMEDY_DEVLINK_RECOVER: return "devlink recover";
+ case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle";
+ case REMEDY_POWER_CYCLE_HOST: return "host power cycle";
+ case REMEDY_FW_UPDATE: return "update firmware";
+ case REMEDY_HW_REPLACE: return "replace hardware";
+ default: return "unknown";
+ }
+}
+
+static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val, health_status;
+ struct bnxt_fw_health *h = bp->fw_health;
+ u32 fw_status, fw_resets;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- return 0;
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
- val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- health_status = val & 0xffff;
+ if (!h->status_reliable)
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
- if (health_status < BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Not yet completed initialization");
+ mutex_lock(&h->lock);
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (BNXT_FW_IS_BOOTING(fw_status)) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
if (rc)
- return rc;
- } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Encountered fatal error and cannot recover");
+ goto unlock;
+ } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
+ if (!h->severity) {
+ h->severity = SEVERITY_FATAL;
+ h->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ h->diagnoses++;
+ devlink_health_report(h->fw_reporter,
+ "FW error diagnosed", h);
+ }
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
if (rc)
- return rc;
+ goto unlock;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
+ if (rc)
+ goto unlock;
+ } else {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+ if (rc)
+ goto unlock;
}
- if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
+ rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
+ bnxt_health_severity_str(h->severity));
+ if (rc)
+ goto unlock;
+
+ if (h->severity) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
+ bnxt_health_remedy_str(h->remedy));
if (rc)
- return rc;
+ goto unlock;
+ if (h->remedy == REMEDY_DEVLINK_RECOVER) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
+ "traffic+ntuple_cfg");
+ if (rc)
+ goto unlock;
+ }
}
- val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
- if (rc)
+unlock:
+ mutex_unlock(&h->lock);
+ if (rc || !h->resets_reliable)
return rc;
- return 0;
+ fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
+ if (rc)
+ return rc;
+ return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
}
-static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
- .name = "fw",
- .diagnose = bnxt_fw_reporter_diagnose,
-};
-
-static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ u32 dump_len;
+ void *data;
+ int rc;
- if (!priv_ctx)
+ /* TODO: no firmware dump support in devlink_health_report() context */
+ if (priv_ctx)
return -EOPNOTSUPP;
- bnxt_fw_reset(bp);
- return -EINPROGRESS;
-}
+ dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE);
+ if (!dump_len)
+ return -EIO;
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
- .name = "fw_reset",
- .recover = bnxt_fw_reset_recover,
-};
+ data = vmalloc(dump_len);
+ if (!data)
+ return -ENOMEM;
-static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+ rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
+ if (!rc) {
+ rc = devlink_fmsg_pair_nest_start(fmsg, "core");
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_pair_nest_end(fmsg);
+ }
+
+exit:
+ vfree(data);
+ return rc;
+}
+
+static int bnxt_fw_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- unsigned long event;
- if (!priv_ctx)
- return -EOPNOTSUPP;
+ if (bp->fw_health->severity == SEVERITY_FATAL)
+ return -ENODEV;
- bp->fw_health->fatal = true;
- event = fw_reporter_ctx->sp_event;
- if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
- bnxt_fw_reset(bp);
- else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
- bnxt_fw_exception(bp);
+ set_bit(BNXT_STATE_RECOVER, &bp->state);
+ __bnxt_fw_recover(bp);
return -EINPROGRESS;
}
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
- .name = "fw_fatal",
- .recover = bnxt_fw_fatal_recover,
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_diagnose,
+ .dump = bnxt_fw_dump,
+ .recover = bnxt_fw_recover,
};
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
+ }
+
+ return reporter;
+}
+
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!bp->dl || !health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
- goto err_recovery;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- health->fw_reset_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reset_reporter_ops,
- 0, true, bp);
- if (IS_ERR(health->fw_reset_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reset_reporter));
- health->fw_reset_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
+}
-err_recovery:
- if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+void bnxt_devlink_health_fw_report(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int rc;
+
+ if (!fw_health)
return;
- if (!health->fw_reporter) {
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reporter_ops,
- 0, false, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- return;
- }
+ if (!fw_health->fw_reporter) {
+ __bnxt_fw_recover(bp);
+ return;
}
- if (health->fw_fatal_reporter)
- return;
+ mutex_lock(&fw_health->lock);
+ fw_health->severity = SEVERITY_RECOVERABLE;
+ fw_health->remedy = REMEDY_DEVLINK_RECOVER;
+ mutex_unlock(&fw_health->lock);
+ rc = devlink_health_report(fw_health->fw_reporter, "FW error reported",
+ fw_health);
+ if (rc == -ECANCELED)
+ __bnxt_fw_recover(bp);
+}
+
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u8 state;
- health->fw_fatal_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_fatal_reporter_ops,
- 0, true, bp);
- if (IS_ERR(health->fw_fatal_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
- health->fw_fatal_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ mutex_lock(&fw_health->lock);
+ if (healthy) {
+ fw_health->severity = SEVERITY_NORMAL;
+ state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ } else {
+ fw_health->severity = SEVERITY_FATAL;
+ fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
}
+ mutex_unlock(&fw_health->lock);
+ devlink_health_reporter_state_update(fw_health->fw_reporter, state);
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_dl *dl = devlink_priv(bp->dl);
- if (!bp->dl || !health)
- return;
+ devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter);
+ bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset);
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
+static void
+bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
+ struct hwrm_fw_livepatch_output *resp)
+{
+ int err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (err) {
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE:
+ netdev_err(bp->dev, "Illegal live patch opcode");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid opcode");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not found");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Live patch deactivation failed. Firmware not patched.");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER:
+ NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
+ break;
+ default:
+ netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
+ break;
+ }
+}
- if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
- health->fw_reset_reporter) {
- devlink_health_reporter_destroy(health->fw_reset_reporter);
- health->fw_reset_reporter = NULL;
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
+static int
+bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ struct hwrm_fw_livepatch_query_output *query_resp;
+ struct hwrm_fw_livepatch_query_input *query_req;
+ struct hwrm_fw_livepatch_output *patch_resp;
+ struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
+ u32 installed = 0;
+ u8 target;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch");
+ return -EOPNOTSUPP;
}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+ rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+ query_resp = hwrm_req_hold(bp, query_req);
+
+ rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH);
+ if (rc) {
+ hwrm_req_drop(bp, query_req);
+ return rc;
+ }
+ patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
+ patch_resp = hwrm_req_hold(bp, patch_req);
+
+ for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) {
+ query_req->fw_target = target;
+ rc = hwrm_req_send(bp, query_req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query packages");
+ break;
+ }
+
+ flags = le16_to_cpu(query_resp->status_flags);
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
+ continue;
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
+ continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ patch_req->fw_target = target;
+ rc = hwrm_req_send(bp, patch_req);
+ if (rc) {
+ bnxt_dl_livepatch_report_err(bp, extack, patch_resp);
+ break;
+ }
+ installed++;
}
- if (health->fw_fatal_reporter) {
- devlink_health_reporter_destroy(health->fw_fatal_reporter);
- health->fw_fatal_reporter = NULL;
+ if (!rc && !installed) {
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
+ hwrm_req_drop(bp, query_req);
+ hwrm_req_drop(bp, patch_req);
+ return rc;
}
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *fw_health = bp->fw_health;
- struct bnxt_fw_reporter_ctx fw_reporter_ctx;
-
- fw_reporter_ctx.sp_event = event;
- switch (event) {
- case BNXT_FW_RESET_NOTIFY_SP_EVENT:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal async event received",
- &fw_reporter_ctx);
- return;
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ rtnl_lock();
+ if (bnxt_sriov_cfg(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "reload is unsupported while VFs are allocated or being configured");
+ rtnl_unlock();
+ return -EOPNOTSUPP;
+ }
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ bnxt_ulp_stop(bp);
+ if (netif_running(bp->dev)) {
+ rc = bnxt_close_nic(bp, true, true);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to close");
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
}
- if (!fw_health->fw_reset_reporter)
- return;
+ bnxt_vf_reps_free(bp);
+ rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
+ if (netif_running(bp->dev))
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ bnxt_cancel_reservations(bp, false);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return bnxt_dl_livepatch_activate(bp, extack);
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) {
+ NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot");
+ return -EOPNOTSUPP;
+ }
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+ rtnl_lock();
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ if (netif_running(bp->dev))
+ set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rc = bnxt_hwrm_firmware_reset(bp->dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ FW_RESET_REQ_FLAGS_RESET_GRACEFUL |
+ FW_RESET_REQ_FLAGS_FW_ACTIVATION);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rtnl_unlock();
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ }
- devlink_health_report(fw_health->fw_reset_reporter,
- "FW non-fatal reset event received",
- &fw_reporter_ctx);
- return;
+ return rc;
+}
- case BNXT_FW_EXCEPTION_SP_EVENT:
- if (!fw_health->fw_fatal_reporter)
- return;
+static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ *actions_performed = 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_fw_init_one(bp);
+ bnxt_vf_reps_alloc(bp);
+ if (netif_running(bp->dev))
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
+ if (!rc) {
+ bnxt_reenable_sriov(bp);
+ bnxt_ptp_reapply_pps(bp);
+ }
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ unsigned long start = jiffies;
+ unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10;
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal error reported",
- &fw_reporter_ctx);
- return;
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10;
+ if (!netif_running(bp->dev))
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device is closed, not waiting for reset notice that will never come");
+ rtnl_unlock();
+ while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
+ if (time_after(jiffies, timeout)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation incomplete");
+ rc = -ETIMEDOUT;
+ break;
+ }
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation aborted");
+ rc = -ENODEV;
+ break;
+ }
+ msleep(50);
+ }
+ rtnl_lock();
+ if (!rc)
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
}
+
+ if (!rc) {
+ bnxt_print_device_info(bp);
+ if (netif_running(bp->dev)) {
+ mutex_lock(&bp->link_lock);
+ bnxt_report_link(bp);
+ mutex_unlock(&bp->link_lock);
+ }
+ *actions_performed |= BIT(action);
+ } else if (netif_running(bp->dev)) {
+ dev_close(bp->dev);
+ }
+ rtnl_unlock();
+ return rc;
}
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *health = bp->fw_health;
- u8 state;
+ bool rc = false;
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
- if (healthy)
- state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
- else
- state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
- if (health->fatal)
- devlink_health_reporter_state_update(health->fw_fatal_reporter,
- state);
- else
- devlink_health_reporter_state_update(health->fw_reset_reporter,
- state);
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto done;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto done;
+ }
- health->fatal = false;
+ rc = true;
+
+done:
+ kfree(buf);
+ return rc;
}
-void bnxt_dl_health_recovery_done(struct bnxt *bp)
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *hlth = bp->fw_health;
-
- if (hlth->fatal)
- devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
- else
- devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
}
-static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
- struct netlink_ext_ack *extack);
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
@@ -294,6 +676,13 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
.info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
+ .reload_down = bnxt_dl_reload_down,
+ .reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -359,64 +748,155 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
-static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
- union devlink_param_value *nvm_cfg_ver)
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
{
- struct hwrm_nvm_get_variable_input req = {0};
+ struct hwrm_nvm_get_variable_input *req;
+ u16 bytes = BNXT_NVM_CFG_VER_BYTES;
+ u16 bits = BNXT_NVM_CFG_VER_BITS;
+ union devlink_param_value ver;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
+ int rc, i = 2;
+ u16 dim = 1;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
+ if (!data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* earlier devices present as an array of raw bytes */
+ if (!BNXT_CHIP_P5(bp)) {
+ dim = 0;
+ i = 0;
+ bits *= 3; /* array of 3 version components */
+ bytes *= 4; /* copy whole word */
+ }
+
+ hwrm_req_hold(bp, req);
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+ req->data_len = cpu_to_le16(bits);
+ req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+ req->dimensions = cpu_to_le16(dim);
+
+ while (i >= 0) {
+ req->index_0 = cpu_to_le16(i--);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto exit;
+ bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
+
+ if (BNXT_CHIP_P5(bp)) {
+ *nvm_cfg_ver <<= 8;
+ *nvm_cfg_ver |= ver.vu8;
+ } else {
+ *nvm_cfg_ver = ver.vu32;
+ }
+ }
+
+exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
+ enum bnxt_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
+ return 0;
+
+ switch (type) {
+ case BNXT_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNXT_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNXT_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+ return 0;
+}
+
+#define BNXT_FW_SRT_PATCH "fw.srt.patch"
+#define BNXT_FW_CRT_PATCH "fw.crt.patch"
+
+static int bnxt_dl_livepatch_info_put(struct bnxt *bp,
+ struct devlink_info_req *req,
+ const char *key)
+{
+ struct hwrm_fw_livepatch_query_input *query;
+ struct hwrm_fw_livepatch_query_output *resp;
+ u16 flags;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
- data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
- &data_dma_addr, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH)
+ return 0;
- req.dest_data_addr = cpu_to_le64(data_dma_addr);
- req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
- req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+ rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
- bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
- BNXT_NVM_CFG_VER_BITS,
- BNXT_NVM_CFG_VER_BYTES);
+ if (!strcmp(key, BNXT_FW_SRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW;
+ else if (!strcmp(key, BNXT_FW_CRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW;
+ else
+ goto exit;
+
+ resp = hwrm_req_hold(bp, query);
+ rc = hwrm_req_send(bp, query);
+ if (rc)
+ goto exit;
+
+ flags = le16_to_cpu(resp->status_flags);
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) {
+ resp->active_ver[sizeof(resp->active_ver) - 1] = '\0';
+ rc = devlink_info_version_running_put(req, key, resp->active_ver);
+ if (rc)
+ goto exit;
+ }
+
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) {
+ resp->install_ver[sizeof(resp->install_ver) - 1] = '\0';
+ rc = devlink_info_version_stored_put(req, key, resp->install_ver);
+ if (rc)
+ goto exit;
+ }
- dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+exit:
+ hwrm_req_drop(bp, query);
return rc;
}
+#define HWRM_FW_VER_STR_LEN 16
+
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
- union devlink_param_value nvm_cfg_ver;
struct hwrm_ver_get_output *ver_resp;
char mgmt_ver[FW_VER_STR_LEN];
char roce_ver[FW_VER_STR_LEN];
- char fw_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
char buf[32];
+ u32 ver = 0;
int rc;
rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
if (rc)
return rc;
- sprintf(buf, "%X", bp->chip_num);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
- if (rc)
- return rc;
-
- ver_resp = &bp->ver_resp;
- sprintf(buf, "%X", ver_resp->chip_rev);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
- if (rc)
- return rc;
-
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
@@ -425,32 +905,60 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
}
- if (strlen(ver_resp->active_pkg_name)) {
- rc =
- devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- ver_resp->active_pkg_name);
+ if (strlen(bp->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req, bp->board_serialno);
if (rc)
return rc;
}
- if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
- u32 ver = nvm_cfg_ver.vu32;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_partno);
+ if (rc)
+ return rc;
+
+ sprintf(buf, "%X", bp->chip_num);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc)
+ return rc;
+
+ ver_resp = &bp->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc)
+ return rc;
+
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bp->nvm_cfg_ver);
+ if (rc)
+ return rc;
- sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
- ver & 0xF);
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
+
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &ver)) {
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
+ ver & 0xff);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ buf);
if (rc)
return rc;
}
if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
@@ -458,11 +966,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
} else {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
@@ -470,37 +978,92 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
}
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
if (rc)
return rc;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
- if (rc)
- return rc;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bp->hwrm_ver_supp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
+ if (rc ||
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) {
+ if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf)))
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ return 0;
+ }
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc)
return rc;
}
- return 0;
+ return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
+
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- int msg_len, union devlink_param_value *val)
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
struct bnxt_dl_nvm_param nvm_param;
+ struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
int idx = 0, rc, i;
/* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp))
+ if (BNXT_VF(bp)) {
+ hwrm_req_drop(bp, req);
return -EPERM;
+ }
for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
if (nvm_params[i].id == param_id) {
@@ -509,18 +1072,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
- if (i == ARRAY_SIZE(nvm_params))
+ if (i == ARRAY_SIZE(nvm_params)) {
+ hwrm_req_drop(bp, req);
return -EOPNOTSUPP;
+ }
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
- data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
- &data_dma_addr, GFP_KERNEL);
- if (!data)
+ data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
+
+ if (!data) {
+ hwrm_req_drop(bp, req);
return -ENOMEM;
+ }
req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
@@ -529,26 +1096,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
if (idx)
req->dimensions = cpu_to_le16(1);
+ resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
- rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, msg);
} else {
- rc = hwrm_send_message_silent(bp, msg, msg_len,
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
} else {
- struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
-
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
rc = -EOPNOTSUPP;
}
}
- dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ hwrm_req_drop(bp, req);
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return rc;
@@ -557,15 +1122,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
- rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
- if (!rc)
- if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
- ctx->val.vbool = !ctx->val.vbool;
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
+ if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
return rc;
}
@@ -573,15 +1140,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct hwrm_nvm_set_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_set_variable_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE);
+ if (rc)
+ return rc;
if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
- return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
@@ -604,6 +1174,32 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
return 0;
}
+static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = bnxt_dl_get_remote_reset(dl);
+ return 0;
+}
+
+static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
+
+ rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool);
+ if (rc)
+ return rc;
+
+ bnxt_dl_set_remote_reset(dl, ctx->val.vbool);
+ return rc;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
@@ -626,100 +1222,99 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
-};
-
-static const struct devlink_param bnxt_dl_port_params[] = {
+ /* keep REMOTE_DEV_RESET last, it is excluded based on caps */
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ bnxt_remote_dev_reset_get,
+ bnxt_remote_dev_reset_set, NULL),
};
static int bnxt_dl_params_register(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
- rc = devlink_params_register(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params);
+ if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
- return rc;
- }
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed\n");
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- return rc;
- }
- devlink_params_publish(bp->dl);
-
- return 0;
+ return rc;
}
static void bnxt_dl_params_unregister(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
+
if (bp->hwrm_spec_code < 0x10600)
return;
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params, num_params);
}
int bnxt_dl_register(struct bnxt *bp)
{
+ const struct devlink_ops *devlink_ops;
+ struct devlink_port_attrs attrs = {};
+ struct bnxt_dl *bp_dl;
struct devlink *dl;
int rc;
if (BNXT_PF(bp))
- dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ devlink_ops = &bnxt_dl_ops;
else
- dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
+ devlink_ops = &bnxt_vf_dl_ops;
+
+ dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev);
if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
- bnxt_link_bp_to_dl(bp, dl);
+ bp->dl = dl;
+ bp_dl = devlink_priv(dl);
+ bp_dl->bp = bp;
+ bnxt_dl_set_remote_reset(dl, true);
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl, &bp->pdev->dev);
- if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
- goto err_dl_free;
- }
-
if (!BNXT_PF(bp))
- return 0;
+ goto out;
- devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- bp->pf.port_id, false, 0, bp->dsn,
- sizeof(bp->dsn));
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = bp->pf.port_id;
+ memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn));
+ attrs.switch_id.id_len = sizeof(bp->dsn);
+ devlink_port_attrs_set(&bp->dl_port, &attrs);
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed\n");
- goto err_dl_unreg;
+ goto err_dl_free;
}
rc = bnxt_dl_params_register(bp);
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
+out:
+ devlink_register(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_unreg:
- devlink_unregister(dl);
err_dl_free:
- bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
return rc;
}
@@ -728,13 +1323,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
if (BNXT_PF(bp)) {
bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
}
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 95f893f2a74d..b8105065367b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -13,6 +13,7 @@
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
+ bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
@@ -20,17 +21,21 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
+static inline void bnxt_dl_remote_reload(struct bnxt *bp)
{
- bp->dl = dl;
+ devlink_remote_reload_actions_performed(bp->dl, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+}
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
+static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
+}
- bp_dl->bp = bp;
- }
+static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
+{
+ ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
}
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
@@ -40,10 +45,10 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_ENABLE_SRIOV 401
#define NVM_OFF_NVM_CFG_VER 602
-#define BNXT_NVM_CFG_VER_BITS 24
-#define BNXT_NVM_CFG_VER_BYTES 4
+#define BNXT_NVM_CFG_VER_BITS 8
+#define BNXT_NVM_CFG_VER_BYTES 1
-#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
enum bnxt_nvm_dir_type {
@@ -60,11 +65,17 @@ struct bnxt_dl_nvm_param {
u8 dl_num_bytes;
};
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
-void bnxt_dl_health_recovery_done(struct bnxt *bp);
+enum bnxt_dl_version_type {
+ BNXT_VERSION_FIXED,
+ BNXT_VERSION_RUNNING,
+ BNXT_VERSION_STORED,
+};
+
+void bnxt_devlink_health_fw_report(struct bnxt *bp);
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f67e6729a2c..8cad15c458b3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,8 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -18,16 +20,27 @@
#include <linux/firmware.h>
#include <linux/utsname.h>
#include <linux/time.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
-#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
-#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+
+#define BNXT_NVM_ERR_MSG(dev, extack, msg) \
+ do { \
+ if (extack) \
+ NL_SET_ERR_MSG_MOD(extack, msg); \
+ netdev_err(dev, "%s\n", msg); \
+ } while (0)
static u32 bnxt_get_msglevel(struct net_device *dev)
{
@@ -44,7 +57,9 @@ static void bnxt_set_msglevel(struct net_device *dev, u32 value)
}
static int bnxt_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_coal *hw_coal;
@@ -60,6 +75,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_rx = true;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -67,6 +85,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_tx = true;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -74,7 +95,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
}
static int bnxt_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
bool update_stats = false;
@@ -91,12 +114,22 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !(bp->coal_cap.cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
+ return -EOPNOTSUPP;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_rx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -104,6 +137,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_tx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -124,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
@@ -137,20 +175,23 @@ reset_coalesce:
return rc;
}
-static const char * const bnxt_ring_stats_str[] = {
+static const char * const bnxt_ring_rx_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
- "rx_drops",
+ "rx_errors",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
+ "tx_errors",
"tx_discards",
- "tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
@@ -169,11 +210,16 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_pkt",
"rx_tpa_bytes",
"rx_tpa_errors",
+ "rx_tpa_events",
};
-static const char * const bnxt_ring_sw_stats_str[] = {
+static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_resets",
"rx_buf_errors",
+};
+
+static const char * const bnxt_cmn_sw_stats_str[] = {
"missed_irqs",
};
@@ -287,12 +333,10 @@ static const char * const bnxt_ring_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
-#define BNXT_PCIE_STATS_ENTRY(counter) \
- { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
-
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
+ RX_NETPOLL_DISCARDS,
};
static struct {
@@ -301,8 +345,14 @@ static struct {
} bnxt_sw_func_stats[] = {
{0, "rx_total_discard_pkts"},
{0, "tx_total_discard_pkts"},
+ {0, "rx_total_netpoll_discards"},
};
+#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
+#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
+#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
+#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -405,6 +455,8 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
};
static const struct {
@@ -443,24 +495,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-static const struct {
- long offset;
- char string[ETH_GSTRING_LEN];
-} bnxt_pcie_stats_arr[] = {
- BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
- BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
-};
-
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -468,26 +502,30 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
-#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
- if (bp->max_tpa_v2)
- return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
- return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ return BNXT_NUM_TPA_RING_STATS_P5;
+ return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ }
+ return BNXT_NUM_TPA_RING_STATS;
}
return 0;
}
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
- int num_stats;
+ int rx, tx, cmn;
- num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
- ARRAY_SIZE(bnxt_ring_sw_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
- return num_stats * bp->cp_nr_rings;
+ rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
+ bnxt_get_num_tpa_ring_stats(bp);
+ tx = NUM_RING_TX_HW_STATS;
+ cmn = NUM_RING_CMN_SW_STATS;
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -506,9 +544,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS)
- num_stats += BNXT_NUM_PCIE_STATS;
-
return num_stats;
}
@@ -528,13 +563,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
}
}
+static bool is_rx_ring(struct bnxt *bp, int ring_num)
+{
+ return ring_num < bp->rx_nr_rings;
+}
+
+static bool is_tx_ring(struct bnxt *bp, int ring_num)
+{
+ int tx_base = 0;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ tx_base = bp->rx_nr_rings;
+
+ if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
+ return true;
+ return false;
+}
+
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
+ u32 tpa_stats;
if (!bp->bnapi) {
j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
@@ -544,22 +595,49 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
+ tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw_stats = cpr->stats.sw_stats;
+ u64 *sw;
int k;
- for (k = 0; k < stat_fields; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
- buf[j++] = cpr->rx_l4_csum_errors;
- buf[j++] = cpr->rx_buf_errors;
- buf[j++] = cpr->missed_irqs;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
+ buf[j] = sw_stats[k];
+ }
+ if (is_tx_ring(bp, i)) {
+ k = NUM_RING_RX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ j++, k++)
+ buf[j] = sw_stats[k];
+ }
+ if (!tpa_stats || !is_rx_ring(bp, i))
+ goto skip_tpa_ring_stats;
+
+ k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
+ tpa_stats; j++, k++)
+ buf[j] = sw_stats[k];
+
+skip_tpa_ring_stats:
+ sw = (u64 *)&cpr->sw_stats.rx;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
+ buf[j] = sw[k];
+ }
+
+ sw = (u64 *)&cpr->sw_stats.cmn;
+ for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
+ buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
+ bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
+ cpr->sw_stats.rx.rx_netpoll_discards;
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
@@ -567,60 +645,50 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+ u64 *port_stats = bp->port_stats.sw_stats;
- for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats +
- bnxt_port_stats_arr[i].offset));
- }
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
+ buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
- __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
+ u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
+ u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(rx_port_stats_ext +
- bnxt_port_stats_ext_arr[i].offset));
+ buf[j] = *(rx_port_stats_ext +
+ bnxt_port_stats_ext_arr[i].offset);
}
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(tx_port_stats_ext +
- bnxt_tx_port_stats_ext_arr[i].offset));
+ buf[j] = *(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset);
}
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
-
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(pcie_stats +
- bnxt_pcie_stats_arr[i].offset));
- }
- }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -632,31 +700,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- num_str = ARRAY_SIZE(bnxt_ring_stats_str);
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_rx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
- if (!BNXT_SUPPORTS_TPA(bp))
+ if (is_tx_ring(bp, i)) {
+ num_str = NUM_RING_TX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_tx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = bnxt_get_num_tpa_ring_stats(bp);
+ if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
- if (bp->max_tpa_v2) {
- num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ if (bp->max_tpa_v2)
str = bnxt_ring_tpa2_stats_str;
- } else {
- num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ else
str = bnxt_ring_tpa_stats_str;
- }
+
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
}
skip_tpa_stats:
- num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_SW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_rx_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = NUM_RING_CMN_SW_STATS;
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
- bnxt_ring_sw_stats_str[j]);
+ bnxt_cmn_sw_stats_str[j]);
buf += ETH_GSTRING_LEN;
}
}
@@ -704,12 +789,6 @@ skip_tpa_stats:
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
- strcpy(buf, bnxt_pcie_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
- }
- }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -724,12 +803,21 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
- ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ } else {
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+ }
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
@@ -738,13 +826,15 @@ static void bnxt_get_ringparam(struct net_device *dev,
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS))
+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
if (netif_running(dev))
@@ -766,16 +856,22 @@ static void bnxt_get_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_rx_rings, max_tx_rings, tcs;
- int max_tx_sch_inputs;
+ int max_tx_sch_inputs, tx_grps;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (BNXT_NEW_RM(bp))
+ if (netif_running(dev) && BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
+
+ tcs = netdev_get_num_tc(dev);
+ tx_grps = max(tcs, 1);
+ if (bp->tx_nr_rings_xdp)
+ tx_grps++;
+ max_tx_rings /= tx_grps;
channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
@@ -785,7 +881,6 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -848,6 +943,13 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
+ if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
+ bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
+ netif_is_rxfh_configured(dev)) {
+ netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
+ return -EINVAL;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1029,7 +1131,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -1048,7 +1150,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -1195,8 +1297,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return rc;
}
-static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
return HW_HASH_INDEX_SIZE;
}
@@ -1210,7 +1316,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
- int i = 0;
+ u32 i, tbl_size;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -1219,9 +1325,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
vnic = &bp->vnic_info[0];
- if (indir && vnic->rss_table) {
- for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
- indir[i] = le16_to_cpu(vnic->rss_table[i]);
+ if (indir && bp->rss_indir_tbl) {
+ tbl_size = bnxt_get_rxfh_indir_size(dev);
+ for (i = 0; i < tbl_size; i++)
+ indir[i] = bp->rss_indir_tbl[i];
}
if (key && vnic->rss_hash_key)
@@ -1230,15 +1337,43 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
}
+static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+
+ for (i = 0; i < tbl_size; i++)
+ bp->rss_indir_tbl[i] = indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ }
+
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ }
+ return rc;
+}
+
static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -1247,6 +1382,63 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->regdump_len = 0;
}
+static int bnxt_get_regs_len(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int reg_len;
+
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
+ reg_len = BNXT_PXP_REG_LEN;
+
+ if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
+ reg_len += sizeof(struct pcie_ctx_hw_stats);
+
+ return reg_len;
+}
+
+static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+{
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_input *req;
+ struct bnxt *bp = netdev_priv(dev);
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ regs->version = 0;
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats) {
+ hwrm_req_drop(bp, req);
+ return;
+ }
+
+ regs->version = 1;
+ hwrm_req_hold(bp, req); /* hold on to slice */
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ __le64 *src = (__le64 *)hw_pcie_stats;
+ u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+ int i;
+
+ for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+ dst[i] = le64_to_cpu(src[i]);
+ }
+ hwrm_req_drop(bp, req);
+}
+
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnxt *bp = netdev_priv(dev);
@@ -1377,6 +1569,53 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
+#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 200000baseCR4_Full);\
+}
+
+#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 200000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.advertising);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.advertising);
+}
+
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
@@ -1387,6 +1626,9 @@ static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
fw_pause = link_info->auto_pause_setting;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
+ fw_speeds = link_info->advertising_pam4;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
+ bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
}
static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
@@ -1400,22 +1642,53 @@ static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
lp_advertising);
+ fw_speeds = link_info->lp_auto_pam4_link_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
+}
+
+static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if (fec_cfg & BNXT_FEC_NONE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.supported);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.supported);
}
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+ fw_speeds = link_info->support_pam4_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+ }
- if (link_info->support_auto_speeds)
+ if (link_info->support_auto_speeds ||
+ link_info->support_pam4_auto_speeds)
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
Autoneg);
+ bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -1506,55 +1779,86 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
+static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_pam4_spds = link_info->support_pam4_speeds;
u16 support_spds = link_info->support_speeds;
- u32 fw_speed = 0;
+ u8 sig_mode = BNXT_SIG_MODE_NRZ;
+ u16 fw_speed = 0;
switch (ethtool_speed) {
case SPEED_100:
if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
break;
case SPEED_25000:
if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
break;
case SPEED_50000:
- if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
case SPEED_100000:
- if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
- default:
- netdev_err(dev, "unsupported speed!\n");
+ case SPEED_200000:
+ if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
}
- return fw_speed;
+
+ if (!fw_speed) {
+ netdev_err(dev, "unsupported speed!\n");
+ return -EINVAL;
+ }
+
+ if (link_info->req_link_speed == fw_speed &&
+ link_info->req_signal_mode == sig_mode &&
+ link_info->autoneg == 0)
+ return -EALREADY;
+
+ link_info->req_link_speed = fw_speed;
+ link_info->req_signal_mode = sig_mode;
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg = 0;
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+
+ return 0;
}
u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
@@ -1586,7 +1890,6 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
- u16 fw_advertising = 0;
u32 speed;
int rc = 0;
@@ -1595,19 +1898,24 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
- BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+ BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
advertising);
+ BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
+ lk_ksettings, advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
- if (!fw_advertising)
+ if (!link_info->advertising && !link_info->advertising_pam4) {
link_info->advertising = link_info->support_auto_speeds;
- else
- link_info->advertising = fw_advertising;
+ link_info->advertising_pam4 =
+ link_info->support_pam4_auto_speeds;
+ }
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
- u16 fw_speed;
u8 phy_type = link_info->phy_type;
if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
@@ -1623,15 +1931,12 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
goto set_setting_exit;
}
speed = base->speed;
- fw_speed = bnxt_get_fw_speed(dev, speed);
- if (!fw_speed) {
- rc = -EINVAL;
+ rc = bnxt_force_link_speed(dev, speed);
+ if (rc) {
+ if (rc == -EALREADY)
+ rc = 0;
goto set_setting_exit;
}
- link_info->req_link_speed = fw_speed;
- link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
- link_info->autoneg = 0;
- link_info->advertising = 0;
}
if (netif_running(dev))
@@ -1642,6 +1947,129 @@ set_setting_exit:
return rc;
}
+static int bnxt_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fec)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u8 active_fec;
+ u16 fec_cfg;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+ if (fec_cfg & BNXT_FEC_NONE) {
+ fec->fec = ETHTOOL_FEC_NONE;
+ fec->active_fec = ETHTOOL_FEC_NONE;
+ return 0;
+ }
+ if (fec_cfg & BNXT_FEC_AUTONEG)
+ fec->fec |= ETHTOOL_FEC_AUTO;
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ fec->fec |= ETHTOOL_FEC_BASER;
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ fec->fec |= ETHTOOL_FEC_RS;
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ fec->fec |= ETHTOOL_FEC_LLRS;
+
+ switch (active_fec) {
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_BASER;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_RS;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_LLRS;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
+ }
+ return 0;
+}
+
+static void bnxt_get_fec_stats(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ fec_stats->corrected_bits.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
+}
+
+static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
+ u32 fec)
+{
+ u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
+
+ if (fec & ETHTOOL_FEC_BASER)
+ fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
+ else if (fec & ETHTOOL_FEC_RS)
+ fw_fec |= BNXT_FEC_RS_ON(link_info);
+ else if (fec & ETHTOOL_FEC_LLRS)
+ fw_fec |= BNXT_FEC_LLRS_ON;
+ return fw_fec;
+}
+
+static int bnxt_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct hwrm_port_phy_cfg_input *req;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u32 new_cfg, fec = fecparam->fec;
+ u16 fec_cfg;
+ int rc;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ if (fec_cfg & BNXT_FEC_NONE)
+ return -EOPNOTSUPP;
+
+ if (fec & ETHTOOL_FEC_OFF) {
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
+ BNXT_FEC_ALL_OFF(link_info);
+ goto apply_fec;
+ }
+ if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
+ ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
+ ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
+ ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
+ return -EINVAL;
+
+ if (fec & ETHTOOL_FEC_AUTO) {
+ if (!link_info->autoneg)
+ return -EINVAL;
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
+ } else {
+ new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
+ }
+
+apply_fec:
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+ if (rc)
+ return rc;
+ req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_req_send(bp, req);
+ /* update current settings */
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ bnxt_update_link(bp, false);
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
+}
+
static void bnxt_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1655,6 +2083,22 @@ static void bnxt_get_pauseparam(struct net_device *dev,
epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
+static void bnxt_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *epstat)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
+ epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
+}
+
static int bnxt_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1662,17 +2106,18 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (epause->autoneg) {
- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
- return -EINVAL;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ rc = -EINVAL;
+ goto pause_exit;
+ }
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -1690,6 +2135,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
+
+pause_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -1698,7 +2146,29 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
+}
+
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ if (BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ memcpy(nvm_dev_info, resp, sizeof(*resp));
+ hwrm_req_drop(bp, req);
+ return rc;
}
static void bnxt_print_admin_err(struct bnxt *bp)
@@ -1706,58 +2176,87 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev,
- u16 dir_type,
- u16 dir_ordinal,
- u16 dir_ext,
- u16 dir_attr,
- const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_write_input *req;
int rc;
- struct hwrm_nvm_write_input req = {0};
- dma_addr_t dma_handle;
- u8 *kmem;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
+ if (rc)
+ return rc;
- req.dir_type = cpu_to_le16(dir_type);
- req.dir_ordinal = cpu_to_le16(dir_ordinal);
- req.dir_ext = cpu_to_le16(dir_ext);
- req.dir_attr = cpu_to_le16(dir_attr);
- req.dir_data_length = cpu_to_le32(data_len);
+ if (data_len && data) {
+ dma_addr_t dma_handle;
+ u8 *kmem;
- kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
- GFP_KERNEL);
- if (!kmem) {
- netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
- (unsigned)data_len);
- return -ENOMEM;
+ kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
+ if (!kmem) {
+ hwrm_req_drop(bp, req);
+ return -ENOMEM;
+ }
+
+ req->dir_data_length = cpu_to_le32(data_len);
+
+ memcpy(kmem, data, data_len);
+ req->host_src_addr = cpu_to_le64(dma_handle);
}
- memcpy(kmem, data, data_len);
- req.host_src_addr = cpu_to_le64(dma_handle);
- rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
- dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
+ req->dir_type = cpu_to_le16(dir_type);
+ req->dir_ordinal = cpu_to_le16(dir_ordinal);
+ req->dir_ext = cpu_to_le16(dir_ext);
+ req->dir_attr = cpu_to_le16(dir_attr);
+ req->dir_item_length = cpu_to_le32(dir_item_len);
+ rc = hwrm_req_send(bp, req);
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
-static int bnxt_firmware_reset(struct net_device *dev,
- u16 dir_type)
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
- struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_fw_reset_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
+ if (rc)
+ return rc;
+
+ req->embedded_proc_type = proc_type;
+ req->selfrst_status = self_reset;
+ req->flags = flags;
+
+ if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
+ rc = hwrm_req_send_silent(bp, req);
+ } else {
+ rc = hwrm_req_send(bp, req);
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
+ }
+ return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+ enum bnxt_nvm_directory_type dir_type)
+{
+ u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
+ u8 proc_type, flags = 0;
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
@@ -1765,42 +2264,50 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_CHIMP_PATCH:
case BNX_DIR_TYPE_BOOTCODE:
case BNX_DIR_TYPE_BOOTCODE_2:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
/* Self-reset ChiMP upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
- req.embedded_proc_type =
- FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
break;
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
- break;
- case BNXT_FW_RESET_CHIP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- break;
- case BNXT_FW_RESET_AP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
default:
return -EINVAL;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == -EACCES)
- bnxt_print_admin_err(bp);
- return rc;
+ return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
+}
+
+static int bnxt_firmware_reset_chip(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 flags = 0;
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+
+ return bnxt_hwrm_firmware_reset(dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ flags);
+}
+
+static int bnxt_firmware_reset_ap(struct net_device *dev)
+{
+ return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
+ 0);
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1876,7 +2383,7 @@ static int bnxt_flash_firmware(struct net_device *dev,
return -EINVAL;
}
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
- 0, 0, fw_data, fw_size);
+ 0, 0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
rc = bnxt_firmware_reset(dev, dir_type);
@@ -1929,7 +2436,7 @@ static int bnxt_flash_microcode(struct net_device *dev,
return -EINVAL;
}
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
- 0, 0, fw_data, fw_size);
+ 0, 0, 0, fw_data, fw_size);
return rc;
}
@@ -1989,114 +2496,249 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc, filename);
return rc;
}
- if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ if (bnxt_dir_type_is_ape_bin_format(dir_type))
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
- else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ else if (bnxt_dir_type_is_other_exec_format(dir_type))
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
- 0, 0, fw->data, fw->size);
+ 0, 0, 0, fw->data, fw->size);
release_firmware(fw);
return rc;
}
-int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
+#define MSG_INVALID_PKG "PKG install error : Invalid package"
+#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
+#define MSG_INVALID_DEV "PKG install error : Invalid device"
+#define MSG_INTERNAL_ERR "PKG install error : Internal error"
+#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
+#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
+#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
+
+static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
+ struct netlink_ext_ack *extack)
+{
+ switch (result) {
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
+ return -EINVAL;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
+ return -ENOPKG;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
+ return -EPERM;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
+ return -EOPNOTSUPP;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
+ return -EIO;
+ }
+}
+
+#define BNXT_PKG_DMA_SIZE 0x40000
+#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
+#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+
+int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
+ u32 install_type, struct netlink_ext_ack *extack)
{
+ struct hwrm_nvm_install_update_input *install;
+ struct hwrm_nvm_install_update_output *resp;
+ struct hwrm_nvm_modify_input *modify;
struct bnxt *bp = netdev_priv(dev);
- struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_nvm_install_update_input install = {0};
- const struct firmware *fw;
+ bool defrag_attempted = false;
+ dma_addr_t dma_handle;
+ u8 *kmem = NULL;
+ u32 modify_len;
u32 item_len;
- int rc = 0;
+ u8 cmd_err;
u16 index;
+ int rc;
bnxt_hwrm_fw_set_time(bp);
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, &item_len, NULL) != 0) {
- netdev_err(dev, "PKG update area not created in nvram\n");
- return -ENOBUFS;
+ rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
+ if (rc)
+ return rc;
+
+ /* Try allocating a large DMA buffer first. Older fw will
+ * cause excessive NVRAM erases when using small blocks.
+ */
+ modify_len = roundup_pow_of_two(fw->size);
+ modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
+ while (1) {
+ kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
+ if (!kmem && modify_len > PAGE_SIZE)
+ modify_len /= 2;
+ else
+ break;
+ }
+ if (!kmem) {
+ hwrm_req_drop(bp, modify);
+ return -ENOMEM;
}
- rc = request_firmware(&fw, filename, &dev->dev);
- if (rc != 0) {
- netdev_err(dev, "PKG error %d requesting file: %s\n",
- rc, filename);
+ rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
+ if (rc) {
+ hwrm_req_drop(bp, modify);
return rc;
}
- if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
- (unsigned long)fw->size);
- rc = -EFBIG;
- } else {
- dma_addr_t dma_handle;
- u8 *kmem;
- struct hwrm_nvm_modify_input modify = {0};
+ hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
+ hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
- bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+ hwrm_req_hold(bp, modify);
+ modify->host_src_addr = cpu_to_le64(dma_handle);
- modify.dir_idx = cpu_to_le16(index);
- modify.len = cpu_to_le32(fw->size);
+ resp = hwrm_req_hold(bp, install);
+ if ((install_type & 0xffff) == 0)
+ install_type >>= 16;
+ install->install_type = cpu_to_le32(install_type);
- kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
- &dma_handle, GFP_KERNEL);
- if (!kmem) {
- netdev_err(dev,
- "dma_alloc_coherent failure, length = %u\n",
- (unsigned int)fw->size);
- rc = -ENOMEM;
- } else {
- memcpy(kmem, fw->data, fw->size);
- modify.host_src_addr = cpu_to_le64(dma_handle);
+ do {
+ u32 copied = 0, len = modify_len;
- rc = hwrm_send_message(bp, &modify, sizeof(modify),
- FLASH_PACKAGE_TIMEOUT);
- dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
- dma_handle);
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
+ break;
+ }
+ if (fw->size > item_len) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
+ rc = -EFBIG;
+ break;
}
- }
- release_firmware(fw);
- if (rc)
- goto err_exit;
- if ((install_type & 0xffff) == 0)
- install_type >>= 16;
- bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
- install.install_type = cpu_to_le32(install_type);
+ modify->dir_idx = cpu_to_le16(index);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc) {
- u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
-
- if (resp->error_code && error_code ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |= cpu_to_le16(
- NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
+ if (fw->size > modify_len)
+ modify->flags = BNXT_NVM_MORE_FLAG;
+ while (copied < fw->size) {
+ u32 balance = fw->size - copied;
+
+ if (balance <= modify_len) {
+ len = balance;
+ if (copied)
+ modify->flags |= BNXT_NVM_LAST_FLAG;
+ }
+ memcpy(kmem, fw->data + copied, len);
+ modify->len = cpu_to_le32(len);
+ modify->offset = cpu_to_le32(copied);
+ rc = hwrm_req_send(bp, modify);
+ if (rc)
+ goto pkg_abort;
+ copied += len;
+ }
+
+ rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ if (defrag_attempted) {
+ /* We have tried to defragment already in the previous
+ * iteration. Return with the result for INSTALL_UPDATE
+ */
+ break;
}
- if (rc)
- goto flash_pkg_exit;
- }
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
+ install->flags =
+ cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+
+ rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ /* FW has cleared NVM area, driver will create
+ * UPDATE directory and try the flash again
+ */
+ defrag_attempted = true;
+ install->flags = 0;
+ rc = bnxt_flash_nvram(bp->dev,
+ BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST,
+ 0, 0, item_len, NULL, 0);
+ if (!rc)
+ break;
+ }
+ fallthrough;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
+ }
+ } while (defrag_attempted && !rc);
+
+pkg_abort:
+ hwrm_req_drop(bp, modify);
+ hwrm_req_drop(bp, install);
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- rc = -ENOPKG;
+ rc = nvm_update_err_to_stderr(dev, resp->result, extack);
}
-flash_pkg_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
-err_exit:
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
+static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type, struct netlink_ext_ack *extack)
+{
+ const struct firmware *fw;
+ int rc;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
+
+ release_firmware(fw);
+
+ return rc;
+}
+
static int bnxt_flash_device(struct net_device *dev,
struct ethtool_flash *flash)
{
@@ -2108,27 +2750,29 @@ static int bnxt_flash_device(struct net_device *dev,
if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
flash->region > 0xffff)
return bnxt_flash_package_from_file(dev, flash->data,
- flash->region);
+ flash->region, NULL);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
{
+ struct hwrm_nvm_get_dir_info_output *output;
+ struct hwrm_nvm_get_dir_info_input *req;
struct bnxt *bp = netdev_priv(dev);
int rc;
- struct hwrm_nvm_get_dir_info_input req = {0};
- struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ output = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
*entries = le32_to_cpu(output->entries);
*length = le32_to_cpu(output->entry_length);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -2154,12 +2798,15 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
u8 *buf;
size_t buflen;
dma_addr_t dma_handle;
- struct hwrm_nvm_get_dir_entries_input req = {0};
+ struct hwrm_nvm_get_dir_entries_input *req;
rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
if (rc != 0)
return rc;
+ if (!dir_entries || !entry_length)
+ return -EIO;
+
/* Insert 2 bytes of directory info (count and size of entries) */
if (len < 2)
return -EINVAL;
@@ -2169,73 +2816,82 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
len -= 2;
memset(data, 0xff, len);
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
+ if (rc)
+ return rc;
+
buflen = dir_entries * entry_length;
- buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
- GFP_KERNEL);
+ buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
if (!buf) {
- netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
- (unsigned)buflen);
+ hwrm_req_drop(bp, req);
return -ENOMEM;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
- req.host_dest_addr = cpu_to_le64(dma_handle);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->host_dest_addr = cpu_to_le64(dma_handle);
+
+ hwrm_req_hold(bp, req); /* hold the slice */
+ rc = hwrm_req_send(bp, req);
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
- dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
u8 *buf;
dma_addr_t dma_handle;
- struct hwrm_nvm_read_input req = {0};
+ struct hwrm_nvm_read_input *req;
if (!length)
return -EINVAL;
- buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
- GFP_KERNEL);
+ rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
+ if (rc)
+ return rc;
+
+ buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
if (!buf) {
- netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
- (unsigned)length);
+ hwrm_req_drop(bp, req);
return -ENOMEM;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
- req.host_dest_addr = cpu_to_le64(dma_handle);
- req.dir_idx = cpu_to_le16(index);
- req.offset = cpu_to_le32(offset);
- req.len = cpu_to_le32(length);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->host_dest_addr = cpu_to_le64(dma_handle);
+ req->dir_idx = cpu_to_le16(index);
+ req->offset = cpu_to_le32(offset);
+ req->len = cpu_to_le32(length);
+
+ hwrm_req_hold(bp, req); /* hold the slice */
+ rc = hwrm_req_send(bp, req);
if (rc == 0)
memcpy(data, buf, length);
- dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+ hwrm_req_drop(bp, req);
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
+ struct hwrm_nvm_find_dir_entry_output *output;
+ struct hwrm_nvm_find_dir_entry_input *req;
struct bnxt *bp = netdev_priv(dev);
int rc;
- struct hwrm_nvm_find_dir_entry_input req = {0};
- struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
- req.enables = 0;
- req.dir_idx = 0;
- req.dir_type = cpu_to_le16(type);
- req.dir_ordinal = cpu_to_le16(ordinal);
- req.dir_ext = cpu_to_le16(ext);
- req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ req->dir_idx = 0;
+ req->dir_type = cpu_to_le16(type);
+ req->dir_ordinal = cpu_to_le16(ordinal);
+ req->dir_ext = cpu_to_le16(ext);
+ req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
+ output = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
@@ -2244,7 +2900,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -2281,39 +2937,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static void bnxt_get_pkgver(struct net_device *dev)
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
- int len;
+ int rc;
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &pkglen) != 0)
- return;
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &pkglen);
+ if (rc)
+ return rc;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
- return;
+ return -ENOMEM;
}
- if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
+ if (rc)
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ strscpy(ver, pkgver, size);
+ else
+ rc = -ENOENT;
+
+err:
+ kfree(pkgbuf);
+
+ return rc;
+}
+
+static void bnxt_get_pkgver(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ char buf[FW_VER_STR_LEN];
+ int len;
+
+ if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
+ "/pkg %s", buf);
}
-err:
- kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -2339,12 +3012,16 @@ static int bnxt_get_eeprom(struct net_device *dev,
static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
{
+ struct hwrm_nvm_erase_dir_entry_input *req;
struct bnxt *bp = netdev_priv(dev);
- struct hwrm_nvm_erase_dir_entry_input req = {0};
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
+ if (rc)
+ return rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
- req.dir_idx = cpu_to_le16(index);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->dir_idx = cpu_to_le16(index);
+ return hwrm_req_send(bp, req);
}
static int bnxt_set_eeprom(struct net_device *dev,
@@ -2378,13 +3055,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
}
/* Create or re-write an NVM item: */
- if (bnxt_dir_type_is_executable(type) == true)
+ if (bnxt_dir_type_is_executable(type))
return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
- return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+ return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
eeprom->len);
}
@@ -2393,29 +3070,32 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
struct bnxt *bp = netdev_priv(dev);
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
+ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
if (!edata->eee_enabled)
goto eee_ok;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
netdev_warn(dev, "EEE requires autoneg\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
if (edata->tx_lpi_enabled) {
if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
bp->lpi_tmr_lo, bp->lpi_tmr_hi);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
} else if (!bp->lpi_tmr_hi) {
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
@@ -2425,7 +3105,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
} else if (edata->advertised & ~advertising) {
netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
edata->advertised, advertising);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
eee->advertised = edata->advertised;
@@ -2437,6 +3118,8 @@ eee_ok:
if (netif_running(dev))
rc = bnxt_hwrm_set_link_setting(bp, false, true);
+eee_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -2444,7 +3127,7 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
*edata = bp->eee;
@@ -2466,31 +3149,33 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u16 start_addr,
u16 data_length, u8 *buf)
{
- struct hwrm_port_phy_i2c_read_input req = {0};
- struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_phy_i2c_read_output *output;
+ struct hwrm_port_phy_i2c_read_input *req;
int rc, byte_offset = 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
- req.i2c_slave_addr = i2c_addr;
- req.page_number = cpu_to_le16(page_number);
- req.port_id = cpu_to_le16(bp->pf.port_id);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
+ if (rc)
+ return rc;
+
+ output = hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = i2c_addr;
+ req->page_number = cpu_to_le16(page_number);
+ req->port_id = cpu_to_le16(bp->pf.port_id);
do {
u16 xfer_size;
xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
data_length -= xfer_size;
- req.page_offset = cpu_to_le16(start_addr + byte_offset);
- req.data_length = xfer_size;
- req.enables = cpu_to_le32(start_addr + byte_offset ?
+ req->page_offset = cpu_to_le16(start_addr + byte_offset);
+ req->data_length = xfer_size;
+ req->enables = cpu_to_le32(start_addr + byte_offset ?
PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
- mutex_unlock(&bp->hwrm_cmd_lock);
byte_offset += xfer_size;
} while (!rc && data_length > 0);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -2571,7 +3256,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
start, length, data);
}
return rc;
@@ -2599,13 +3284,13 @@ static int bnxt_nway_reset(struct net_device *dev)
static int bnxt_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
- struct hwrm_port_led_cfg_input req = {0};
+ struct hwrm_port_led_cfg_input *req;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_pf_info *pf = &bp->pf;
struct bnxt_led_cfg *led_cfg;
u8 led_state;
__le16 duration;
- int i, rc;
+ int rc, i;
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
@@ -2619,28 +3304,35 @@ static int bnxt_set_phys_id(struct net_device *dev,
} else {
return -EINVAL;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
- req.port_id = cpu_to_le16(pf->port_id);
- req.num_leds = bp->num_leds;
- led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
+ if (rc)
+ return rc;
+
+ req->port_id = cpu_to_le16(pf->port_id);
+ req->num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
for (i = 0; i < bp->num_leds; i++, led_cfg++) {
- req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ req->enables |= BNXT_LED_DFLT_ENABLES(i);
led_cfg->led_id = bp->leds[i].led_id;
led_cfg->led_state = led_state;
led_cfg->led_blink_on = duration;
led_cfg->led_blink_off = duration;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
{
- struct hwrm_selftest_irq_input req = {0};
+ struct hwrm_selftest_irq_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
+ if (rc)
+ return rc;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring);
+ return hwrm_req_send(bp, req);
}
static int bnxt_test_irq(struct bnxt *bp)
@@ -2660,31 +3352,37 @@ static int bnxt_test_irq(struct bnxt *bp)
static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
{
- struct hwrm_port_mac_cfg_input req = {0};
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
- req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
if (enable)
- req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+ req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
else
- req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+ return hwrm_req_send(bp, req);
}
static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
{
- struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_port_phy_qcaps_input req = {0};
+ struct hwrm_port_phy_qcaps_output *resp;
+ struct hwrm_port_phy_qcaps_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc)
*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -2697,7 +3395,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
int rc;
if (!link_info->autoneg ||
- (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
+ (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising);
@@ -2705,7 +3403,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
@@ -2719,7 +3417,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
req->force_link_speed = cpu_to_le16(fw_speed);
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
- rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
req->flags = 0;
req->force_link_speed = cpu_to_le16(0);
return rc;
@@ -2727,21 +3425,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
- struct hwrm_port_phy_cfg_input req = {0};
+ struct hwrm_port_phy_cfg_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+ if (rc)
+ return rc;
+
+ /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
+ hwrm_req_hold(bp, req);
if (enable) {
- bnxt_disable_an_for_lpbk(bp, &req);
+ bnxt_disable_an_for_lpbk(bp, req);
if (ext)
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+ req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
else
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+ req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
- req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+ rc = hwrm_req_send(bp, req);
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -2831,7 +3537,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -2839,12 +3545,12 @@ static int bnxt_run_loopback(struct bnxt *bp)
data[i] = (u8)(i & 0xff);
map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, map)) {
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
@@ -2852,24 +3558,28 @@ static int bnxt_run_loopback(struct bnxt *bp)
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
rc = bnxt_poll_loopback(bp, cpr, pkt_size);
- dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
dev_kfree_skb(skb);
return rc;
}
static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
{
- struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_selftest_exec_input req = {0};
+ struct hwrm_selftest_exec_output *resp;
+ struct hwrm_selftest_exec_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- resp->test_success = 0;
- req.flags = test_mask;
- rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+ rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
+ if (rc)
+ return rc;
+
+ hwrm_req_timeout(bp, req, bp->test_info->timeout);
+ req->flags = test_mask;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
*test_results = resp->test_success;
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -2889,7 +3599,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u8 test_mask = 0;
int rc = 0, i;
- if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ if (!bp->num_tests || !BNXT_PF(bp))
return;
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
@@ -2898,13 +3608,13 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
- (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
do_ext_lpbk = true;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
- if (bp->pf.active_vfs) {
+ if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
etest->flags |= ETH_TEST_FL_FAILED;
- netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
return;
}
offline = true;
@@ -2921,9 +3631,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -2933,6 +3646,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -2958,7 +3672,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -2977,7 +3692,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ bool reload = false;
+ u32 req = *flags;
+
+ if (!req)
+ return -EINVAL;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
@@ -2991,353 +3710,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EBUSY;
}
- if (*flags == ETH_RESET_ALL) {
- /* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc) {
- netdev_info(dev, "Reset request successful.\n");
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
- netdev_info(dev, "Reload driver to complete reset\n");
- *flags = 0;
- }
- } else if (*flags == ETH_RESET_AP) {
+ if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
/* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc) {
- netdev_info(dev, "Reset Application Processor request successful.\n");
- *flags = 0;
- }
- } else {
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
- struct bnxt_hwrm_dbg_dma_info *info)
-{
- struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_dbg_cmn_input *cmn_req = msg;
- __le16 *seq_ptr = msg + info->seq_off;
- u16 seq = 0, len, segs_off;
- void *resp = cmn_resp;
- dma_addr_t dma_handle;
- int rc, off = 0;
- void *dma_buf;
-
- dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
- GFP_KERNEL);
- if (!dma_buf)
- return -ENOMEM;
-
- segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
- total_segments);
- cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
- cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
- mutex_lock(&bp->hwrm_cmd_lock);
- while (1) {
- *seq_ptr = cpu_to_le16(seq);
- rc = _hwrm_send_message(bp, msg, msg_len,
- HWRM_COREDUMP_TIMEOUT);
- if (rc)
- break;
-
- len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
- if (!seq &&
- cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
- info->segs = le16_to_cpu(*((__le16 *)(resp +
- segs_off)));
- if (!info->segs) {
- rc = -EIO;
- break;
- }
-
- info->dest_buf_size = info->segs *
- sizeof(struct coredump_segment_record);
- info->dest_buf = kmalloc(info->dest_buf_size,
- GFP_KERNEL);
- if (!info->dest_buf) {
- rc = -ENOMEM;
- break;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_chip(dev)) {
+ netdev_info(dev, "Firmware reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_CHIP;
}
+ } else if (req == BNXT_FW_RESET_CHIP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
+ }
- if (info->dest_buf) {
- if ((info->seg_start + off + len) <=
- BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
- } else {
- rc = -ENOBUFS;
- break;
+ if (req & BNXT_FW_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+ netdev_info(dev, "Reset application processor successful.\n");
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_AP;
}
+ } else if (req == BNXT_FW_RESET_AP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
-
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
- if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
- break;
-
- seq++;
- off += len;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
- dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
- struct bnxt_coredump *coredump)
-{
- struct hwrm_dbg_coredump_list_input req = {0};
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- int rc;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
-
- info.dma_len = COREDUMP_LIST_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
- data_len);
-
- rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
- if (!rc) {
- coredump->data = info.dest_buf;
- coredump->data_size = info.dest_buf_size;
- coredump->total_segs = info.segs;
- }
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
-{
- struct hwrm_dbg_coredump_initiate_input req = {0};
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
- req.component_id = cpu_to_le16(component_id);
- req.segment_id = cpu_to_le16(segment_id);
-
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
-}
-
-static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
- u16 segment_id, u32 *seg_len,
- void *buf, u32 buf_len, u32 offset)
-{
- struct hwrm_dbg_coredump_retrieve_input req = {0};
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- int rc;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
- req.component_id = cpu_to_le16(component_id);
- req.segment_id = cpu_to_le16(segment_id);
-
- info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
- seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
- data_len);
- if (buf) {
- info.dest_buf = buf + offset;
- info.buf_len = buf_len;
- info.seg_start = offset;
- }
-
- rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
- if (!rc)
- *seg_len = info.dest_buf_size;
-
- return rc;
-}
-static void
-bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
- struct bnxt_coredump_segment_hdr *seg_hdr,
- struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
-{
- memset(seg_hdr, 0, sizeof(*seg_hdr));
- memcpy(seg_hdr->signature, "sEgM", 4);
- if (seg_rec) {
- seg_hdr->component_id = (__force __le32)seg_rec->component_id;
- seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
- seg_hdr->low_version = seg_rec->version_low;
- seg_hdr->high_version = seg_rec->version_hi;
- } else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
- }
- seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
- seg_hdr->length = cpu_to_le32(seg_len);
- seg_hdr->status = cpu_to_le32(status);
- seg_hdr->duration = cpu_to_le32(duration);
- seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
- seg_hdr->instance = cpu_to_le32(instance);
-}
-
-static void
-bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
- time64_t start, s16 start_utc, u16 total_segs,
- int status)
-{
- time64_t end = ktime_get_real_seconds();
- u32 os_ver_major = 0, os_ver_minor = 0;
- struct tm tm;
-
- time64_to_tm(start, 0, &tm);
- memset(record, 0, sizeof(*record));
- memcpy(record->signature, "cOrE", 4);
- record->flags = 0;
- record->low_version = 0;
- record->high_version = 1;
- record->asic_state = 0;
- strlcpy(record->system_name, utsname()->nodename,
- sizeof(record->system_name));
- record->year = cpu_to_le16(tm.tm_year + 1900);
- record->month = cpu_to_le16(tm.tm_mon + 1);
- record->day = cpu_to_le16(tm.tm_mday);
- record->hour = cpu_to_le16(tm.tm_hour);
- record->minute = cpu_to_le16(tm.tm_min);
- record->second = cpu_to_le16(tm.tm_sec);
- record->utc_bias = cpu_to_le16(start_utc);
- strcpy(record->commandline, "ethtool -w");
- record->total_segments = cpu_to_le32(total_segs);
-
- sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
- record->os_ver_major = cpu_to_le32(os_ver_major);
- record->os_ver_minor = cpu_to_le32(os_ver_minor);
-
- strlcpy(record->os_name, utsname()->sysname, 32);
- time64_to_tm(end, 0, &tm);
- record->end_year = cpu_to_le16(tm.tm_year + 1900);
- record->end_month = cpu_to_le16(tm.tm_mon + 1);
- record->end_day = cpu_to_le16(tm.tm_mday);
- record->end_hour = cpu_to_le16(tm.tm_hour);
- record->end_minute = cpu_to_le16(tm.tm_min);
- record->end_second = cpu_to_le16(tm.tm_sec);
- record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
- record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
- bp->ver_resp.chip_rev << 8 |
- bp->ver_resp.chip_metal);
- record->asic_id2 = 0;
- record->coredump_status = cpu_to_le32(status);
- record->ioctl_low_version = 0;
- record->ioctl_high_version = 0;
-}
-
-static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
-{
- u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
- u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
- struct coredump_segment_record *seg_record = NULL;
- struct bnxt_coredump_segment_hdr seg_hdr;
- struct bnxt_coredump coredump = {NULL};
- time64_t start_time;
- u16 start_utc;
- int rc = 0, i;
-
- if (buf)
- buf_len = *dump_len;
-
- start_time = ktime_get_real_seconds();
- start_utc = sys_tz.tz_minuteswest * 60;
- seg_hdr_len = sizeof(seg_hdr);
-
- /* First segment should be hwrm_ver_get response */
- *dump_len = seg_hdr_len + ver_get_resp_len;
- if (buf) {
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len;
- memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
- offset += ver_get_resp_len;
- }
+ if (reload)
+ netdev_info(dev, "Reload driver to complete reset\n");
- rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
- if (rc) {
- netdev_err(bp->dev, "Failed to get coredump segment list\n");
- goto err;
- }
-
- *dump_len += seg_hdr_len * coredump.total_segs;
-
- seg_record = (struct coredump_segment_record *)coredump.data;
- seg_record_len = sizeof(*seg_record);
-
- for (i = 0; i < coredump.total_segs; i++) {
- u16 comp_id = le16_to_cpu(seg_record->component_id);
- u16 seg_id = le16_to_cpu(seg_record->segment_id);
- u32 duration = 0, seg_len = 0;
- unsigned long start, end;
-
- if (buf && ((offset + seg_hdr_len) >
- BNXT_COREDUMP_BUF_LEN(buf_len))) {
- rc = -ENOBUFS;
- goto err;
- }
-
- start = jiffies;
-
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
- if (rc) {
- netdev_err(bp->dev,
- "Failed to initiate coredump for seg = %d\n",
- seg_record->segment_id);
- goto next_seg;
- }
-
- /* Write segment data into the buffer */
- rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
- &seg_len, buf, buf_len,
- offset + seg_hdr_len);
- if (rc && rc == -ENOBUFS)
- goto err;
- else if (rc)
- netdev_err(bp->dev,
- "Failed to retrieve coredump for seg = %d\n",
- seg_record->segment_id);
-
-next_seg:
- end = jiffies;
- duration = jiffies_to_msecs(end - start);
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
-
- if (buf) {
- /* Write segment header into the buffer */
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len + seg_len;
- }
-
- *dump_len += seg_len;
- seg_record =
- (struct coredump_segment_record *)((u8 *)seg_record +
- seg_record_len);
- }
-
-err:
- if (buf)
- bnxt_fill_coredump_record(bp, buf + offset, start_time,
- start_utc, coredump.total_segs + 1,
- rc);
- kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
- return rc;
+ return 0;
}
static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
@@ -3371,10 +3774,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_rsvd_8b;
dump->flag = bp->dump_flag;
- if (bp->dump_flag == BNXT_DUMP_CRASH)
- dump->len = BNXT_CRASH_DUMP_LEN;
- else
- bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
return 0;
}
@@ -3389,21 +3789,45 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
dump->flag = bp->dump_flag;
- if (dump->flag == BNXT_DUMP_CRASH) {
-#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, dump->len);
-#endif
- } else {
- return bnxt_get_coredump(bp, buf, &dump->len);
- }
+ return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
+}
+
+static int bnxt_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp;
+
+ ptp = bp->ptp_cfg;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ info->phc_index = -1;
+ if (!ptp)
+ return 0;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (ptp->ptp_clock)
+ info->phc_index = ptp_clock_index(ptp->ptp_clock);
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
void bnxt_ethtool_init(struct bnxt *bp)
{
- struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_selftest_qlist_input req = {0};
+ struct hwrm_selftest_qlist_output *resp;
+ struct hwrm_selftest_qlist_input *req;
struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
int i, rc;
@@ -3412,22 +3836,25 @@ void bnxt_ethtool_init(struct bnxt *bp)
bnxt_get_pkgver(dev);
bp->num_tests = 0;
- if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
return;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- goto ethtool_init_exit;
-
test_info = bp->test_info;
- if (!test_info)
+ if (!test_info) {
test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
- if (!test_info)
+ if (!test_info)
+ return;
+ bp->test_info = test_info;
+ }
+
+ if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
goto ethtool_init_exit;
- bp->test_info = test_info;
bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
if (bp->num_tests > BNXT_MAX_TEST)
bp->num_tests = BNXT_MAX_TEST;
@@ -3449,7 +3876,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strscpy(str, fw_str, ETH_GSTRING_LEN);
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
if (test_info->offline_mask & (1 << i))
strncat(str, " (offline)",
@@ -3461,7 +3888,134 @@ void bnxt_ethtool_init(struct bnxt *bp)
}
ethtool_init_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
+}
+
+static void bnxt_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ phy_stats->SymbolErrorDuringCarrier =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
+}
+
+static void bnxt_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ mac_stats->FramesReceivedOK =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
+ mac_stats->FramesTransmittedOK =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
+ mac_stats->FrameCheckSequenceErrors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ mac_stats->AlignmentErrors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ mac_stats->OutOfRangeLengthField =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
+}
+
+static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ ctrl_stats->MACControlFramesReceived =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
+}
+
+static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 9216 },
+ { 9217, 16383 },
+ {}
+};
+
+static void bnxt_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ rmon_stats->jabbers =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ rmon_stats->oversize_pkts =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
+ rmon_stats->undersize_pkts =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
+
+ rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
+ rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
+ rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
+ rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
+ rmon_stats->hist[4] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
+ rmon_stats->hist[5] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
+ rmon_stats->hist[6] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
+ rmon_stats->hist[7] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
+ rmon_stats->hist[8] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
+ rmon_stats->hist[9] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
+
+ rmon_stats->hist_tx[0] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
+ rmon_stats->hist_tx[1] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
+ rmon_stats->hist_tx[2] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
+ rmon_stats->hist_tx[3] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
+ rmon_stats->hist_tx[4] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
+ rmon_stats->hist_tx[5] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
+ rmon_stats->hist_tx[6] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
+ rmon_stats->hist_tx[7] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
+ rmon_stats->hist_tx[8] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
+ rmon_stats->hist_tx[9] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
+
+ *ranges = bnxt_rmon_ranges;
}
void bnxt_ethtool_free(struct bnxt *bp)
@@ -3471,11 +4025,24 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_CQE,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
+ .get_fec_stats = bnxt_get_fec_stats,
+ .get_fecparam = bnxt_get_fecparam,
+ .set_fecparam = bnxt_set_fecparam,
+ .get_pause_stats = bnxt_get_pause_stats,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_regs_len = bnxt_get_regs_len,
+ .get_regs = bnxt_get_regs,
.get_wol = bnxt_get_wol,
.set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
@@ -3494,6 +4061,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
+ .set_rxfh = bnxt_set_rxfh,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
@@ -3506,8 +4074,13 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
+ .get_ts_info = bnxt_get_ts_info,
.reset = bnxt_reset,
.set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
.get_dump_data = bnxt_get_dump_data,
+ .get_eth_phy_stats = bnxt_get_eth_phy_stats,
+ .get_eth_mac_stats = bnxt_get_eth_mac_stats,
+ .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
+ .get_rmon_stats = bnxt_get_rmon_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3576d951727b..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,49 +22,6 @@ struct bnxt_led_cfg {
u8 rsvd;
};
-#define COREDUMP_LIST_BUF_LEN 2048
-#define COREDUMP_RETRIEVE_BUF_LEN 4096
-
-struct bnxt_coredump {
- void *data;
- int data_size;
- u16 total_segs;
-};
-
-#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
-
-struct bnxt_hwrm_dbg_dma_info {
- void *dest_buf;
- int dest_buf_size;
- u16 dma_len;
- u16 seq_off;
- u16 data_len_off;
- u16 segs;
- u32 seg_start;
- u32 buf_len;
-};
-
-struct hwrm_dbg_cmn_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
-};
-
-struct hwrm_dbg_cmn_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define HWRM_DBG_CMN_FLAGS_MORE 1
-};
-
-#define BNXT_CRASH_DUMP_LEN (8 << 20)
-
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -77,17 +34,41 @@ struct hwrm_dbg_cmn_output {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
-#define BNXT_FW_RESET_AP 0xfffe
-#define BNXT_FW_RESET_CHIP 0xffff
+#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
+#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
+ ETH_RESET_DMA | ETH_RESET_FILTER | \
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
+ ETH_RESET_PHY | ETH_RESET_RAM) \
+ << ETH_RESET_SHARED_SHIFT)
+
+#define BNXT_PXP_REG_LEN 0x3110
extern const struct ethtool_ops bnxt_ethtool_ops;
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
-int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type);
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags);
+int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
+ u32 install_type, struct netlink_ext_ack *extack);
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7cf27dffadb5..b753032a1047 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2019 Broadcom Inc.
+ * Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -103,6 +103,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
#define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
@@ -164,14 +165,20 @@ struct cmd_nums {
#define HWRM_VNIC_PLCMODES_CFG 0x48UL
#define HWRM_VNIC_PLCMODES_QCFG 0x49UL
#define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
#define HWRM_RING_ALLOC 0x50UL
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
#define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
@@ -179,6 +186,11 @@ struct cmd_nums {
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
#define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
#define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -207,6 +219,13 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -220,6 +239,9 @@ struct cmd_nums {
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
#define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
#define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -230,9 +252,12 @@ struct cmd_nums {
#define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
#define HWRM_FW_STATE_UNQUIESCE 0xd8UL
#define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -284,6 +309,10 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -331,6 +360,25 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -341,6 +389,58 @@ struct cmd_nums {
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
+ #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
+ #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_ATTACH 0x2c7UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
+ #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
+ #define HWRM_TF_EXT_EM_OP 0x2e7UL
+ #define HWRM_TF_EXT_EM_CFG 0x2e8UL
+ #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -356,6 +456,21 @@ struct cmd_nums {
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -398,6 +513,8 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -428,9 +545,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 12
-#define HWRM_VERSION_STR "1.10.1.12"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 95
+#define HWRM_VERSION_STR "1.10.2.95"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -482,6 +599,8 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -504,8 +623,9 @@ struct hwrm_ver_get_output {
__le16 max_resp_len;
__le16 def_req_timeout;
u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
u8 unused_0[2];
u8 always_1;
__le16 hwrm_intf_major;
@@ -529,7 +649,8 @@ struct hwrm_ver_get_output {
__le16 roce_fw_build;
__le16 roce_fw_patch;
__le16 max_ext_req_len;
- u8 unused_1[5];
+ __le16 max_req_timeout;
+ u8 unused_1[3];
u8 valid;
};
@@ -625,6 +746,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -647,6 +769,15 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -754,6 +885,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
@@ -771,7 +904,9 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -800,6 +935,32 @@ struct hwrm_async_event_cmpl_error_recovery {
#define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
};
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -811,6 +972,8 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
@@ -924,6 +1087,296 @@ struct hwrm_async_event_cmpl_eem_cache_flush_done {
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
};
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1044,6 +1497,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
@@ -1057,6 +1512,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
__le16 num_tx_rings;
@@ -1065,7 +1522,8 @@ struct hwrm_func_vf_cfg_input {
__le16 num_vnics;
__le16 num_stat_ctxs;
__le16 num_hw_ring_grps;
- u8 unused_0[4];
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
};
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1089,7 +1547,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:640b/80B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1126,6 +1584,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1146,7 +1608,65 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 unused_1;
u8 valid;
};
@@ -1161,7 +1681,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:704b/88B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1180,6 +1700,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1188,7 +1714,7 @@ struct hwrm_func_qcfg_output {
__le16 alloc_rx_rings;
__le16 alloc_l2_ctx;
__le16 alloc_vnics;
- __le16 mtu;
+ __le16 admin_mtu;
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
@@ -1197,6 +1723,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
u8 port_pf_cnt;
@@ -1267,11 +1794,53 @@ struct hwrm_func_qcfg_output {
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
- u8 unused_2[1];
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 unused_2[3];
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 alloc_tx_key_ctxs;
+ __le16 alloc_rx_key_ctxs;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_3;
u8 valid;
};
-/* hwrm_func_cfg_input (size:704b/88B) */
+/* hwrm_func_cfg_input (size:960b/120B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1302,31 +1871,45 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
- __le16 mtu;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
@@ -1399,6 +1982,51 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 unused_0[7];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1411,6 +2039,18 @@ struct hwrm_func_cfg_output {
u8 valid;
};
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
+ u8 unused_0[7];
+};
+
/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
__le16 req_type;
@@ -1420,9 +2060,10 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY
+ #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
u8 unused_0[5];
};
@@ -1456,6 +2097,59 @@ struct hwrm_func_qstats_output {
u8 valid;
};
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
__le16 req_type;
@@ -1506,13 +2200,16 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1682,7 +2379,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1714,11 +2411,15 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_tx_scheduler_inputs;
__le16 flags;
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[5];
u8 valid;
};
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
struct hwrm_func_vf_resource_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1745,6 +2446,10 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_hw_ring_grps;
__le16 flags;
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[2];
};
@@ -1762,7 +2467,9 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- u8 unused_0[7];
+ __le16 reserved_tx_key_ctxs;
+ __le16 reserved_rx_key_ctxs;
+ u8 unused_0[3];
u8 valid;
};
@@ -1775,7 +2482,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1806,13 +2513,57 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
- __le32 rsvd;
- __le16 rsvd1;
- u8 rsvd2;
- u8 valid;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ u8 rsvd1[7];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
};
-/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1823,22 +2574,27 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2140,6 +2896,102 @@ struct hwrm_func_backing_store_cfg_input {
__le16 tqm_entry_size;
__le16 mrav_entry_size;
__le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ u8 rsvd[2];
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2231,7 +3083,575 @@ struct hwrm_error_recovery_qcfg_output {
#define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
__le32 reset_reg_val[16];
u8 delay_after_reset[16];
- u8 unused_1[7];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_res_local_ts;
+ __le64 ptm_pmstr_ts;
+ __le32 ptm_mstr_prop_dly;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 rsvd[2];
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 rsvd2;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd3[3];
u8 valid;
};
@@ -2268,33 +3688,43 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
__le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -2307,7 +3737,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2338,7 +3767,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2356,7 +3784,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2380,11 +3807,19 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2[2];
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
__le32 tx_lpi_timer;
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le32 unused_3;
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ u8 unused_2[2];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -2429,7 +3864,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 unused_0;
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2466,7 +3916,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2478,7 +3927,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2503,7 +3951,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2521,7 +3968,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2581,7 +4027,15 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -2655,26 +4109,51 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- u8 unused_2[7];
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2696,6 +4175,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2706,6 +4187,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -2738,8 +4220,9 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
- __s32 ptp_freq_adj_ppb;
+ __le32 ptp_freq_adj_ppb;
u8 unused_1[4];
+ __le64 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -2771,16 +4254,18 @@ struct hwrm_port_mac_ptp_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
struct hwrm_port_mac_ptp_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -2797,6 +4282,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le32 tx_ts_reg_off_seq_id;
__le32 tx_ts_reg_off_fifo;
__le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
u8 unused_1[7];
u8 valid;
};
@@ -2934,7 +4421,11 @@ struct hwrm_port_qstats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -2987,7 +4478,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:3648b/456B) */
+/* rx_port_stats_ext (size:3776b/472B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -3046,6 +4537,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos5;
__le64 rx_discard_packets_cos6;
__le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -3058,7 +4551,11 @@ struct hwrm_port_qstats_ext_input {
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[2];
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3106,6 +4603,47 @@ struct hwrm_port_lpbk_qstats_output {
u8 valid;
};
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
__le16 req_type;
@@ -3148,7 +4686,7 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_ts_query_input (size:192b/24B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
struct hwrm_port_ts_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3163,6 +4701,14 @@ struct hwrm_port_ts_query_input {
#define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
__le16 port_id;
u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
};
/* hwrm_port_ts_query_output (size:192b/24B) */
@@ -3188,26 +4734,29 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+/* hwrm_port_phy_qcaps_output (size:256b/32B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -3223,7 +4772,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
__le16 supported_speeds_auto_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
@@ -3239,7 +4787,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
__le16 supported_speeds_eee_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
@@ -3256,8 +4803,21 @@ struct hwrm_port_phy_qcaps_output {
__le32 valid_tx_lpi_timer_high;
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ u8 internal_port_cnt;
+ u8 valid;
};
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
@@ -3696,7 +5256,7 @@ struct hwrm_queue_qportcfg_input {
u8 unused_0;
};
-/* hwrm_queue_qportcfg_output (size:256b/32B) */
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
struct hwrm_queue_qportcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3706,7 +5266,8 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
@@ -3782,6 +5343,79 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
u8 valid;
};
@@ -3840,14 +5474,22 @@ struct hwrm_queue_pfcenable_qcfg_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
u8 unused_0[3];
u8 valid;
};
@@ -3860,14 +5502,22 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id;
u8 unused_0[2];
};
@@ -4746,8 +6396,10 @@ struct hwrm_vnic_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- u8 unused_0[4];
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ __le16 virtio_net_fid;
+ u8 unused_0[2];
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -4806,6 +6458,8 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -4815,7 +6469,17 @@ struct hwrm_vnic_cfg_input {
__le16 default_rx_ring_id;
__le16 default_cmpl_ring_id;
__le16 queue_id;
- u8 unused0[6];
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ u8 unused0[4];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -4848,15 +6512,31 @@ struct hwrm_vnic_qcaps_output {
__le16 mru;
u8 unused_0[2];
__le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -4970,12 +6650,17 @@ struct hwrm_vnic_rss_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -4987,7 +6672,15 @@ struct hwrm_vnic_rss_cfg_input {
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
- u8 unused_1[6];
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
};
/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
@@ -5023,15 +6716,18 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
__le32 enables;
#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
__le32 vnic_id;
__le16 jumbo_thresh;
__le16 hds_offset;
__le16 hds_threshold;
- u8 unused_0[6];
+ __le16 max_bds;
+ u8 unused_0[4];
};
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
@@ -5099,6 +6795,8 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5107,14 +6805,33 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
u8 page_tbl_depth;
- u8 unused_1[2];
+ __le16 schq_id;
__le32 length;
__le16 logical_id;
__le16 cmpl_ring_id;
@@ -5158,7 +6875,14 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 unused_4[3];
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 unused_4[2];
__le64 cq_handle;
};
@@ -5170,11 +6894,15 @@ struct hwrm_ring_alloc_output {
__le16 resp_len;
__le16 ring_id;
__le16 logical_ring_id;
- u8 unused_0[3];
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
u8 valid;
};
-/* hwrm_ring_free_input (size:192b/24B) */
+/* hwrm_ring_free_input (size:256b/32B) */
struct hwrm_ring_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5189,9 +6917,13 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
#define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
__le16 ring_id;
- u8 unused_1[4];
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
};
/* hwrm_ring_free_output (size:128b/16B) */
@@ -5212,11 +6944,12 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -5228,7 +6961,11 @@ struct hwrm_ring_reset_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[4];
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
u8 consumer_idx[3];
u8 valid;
};
@@ -5287,7 +7024,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 target_id;
__le64 resp_addr;
__le16 ring_id;
- u8 unused_0[6];
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
@@ -5393,6 +7134,7 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
@@ -5850,6 +7592,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -5985,6 +7728,7 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -6266,12 +8010,17 @@ struct hwrm_cfa_flow_info_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
u8 unused_0[6];
__le64 ext_flow_handle;
};
@@ -6360,7 +8109,8 @@ struct hwrm_cfa_flow_stats_output {
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
- u8 unused_0[7];
+ __le16 flow_hits;
+ u8 unused_0[5];
u8 valid;
};
@@ -6389,7 +8139,7 @@ struct hwrm_cfa_vfr_alloc_output {
u8 valid;
};
-/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
struct hwrm_cfa_vfr_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6397,6 +8147,9 @@ struct hwrm_cfa_vfr_free_input {
__le16 target_id;
__le64 resp_addr;
char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
};
/* hwrm_cfa_vfr_free_output (size:128b/16B) */
@@ -6564,21 +8317,26 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
u8 unused_0[3];
u8 valid;
};
@@ -6680,15 +8438,15 @@ struct ctx_hw_stats {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6698,21 +8456,21 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
-/* ctx_hw_stats_ext (size:1344b/168B) */
+/* ctx_hw_stats_ext (size:1408b/176B) */
struct ctx_hw_stats_ext {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6721,6 +8479,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
};
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
@@ -6779,7 +8538,9 @@ struct hwrm_stat_ctx_query_input {
__le16 target_id;
__le64 resp_addr;
__le32 stat_ctx_id;
- u8 unused_0[4];
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
};
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
@@ -6791,16 +8552,16 @@ struct hwrm_stat_ctx_query_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
@@ -6812,6 +8573,51 @@ struct hwrm_stat_ctx_query_output {
u8 valid;
};
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
__le16 req_type;
@@ -6871,6 +8677,56 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1216b/152B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+};
+
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
@@ -6898,6 +8754,7 @@ struct hwrm_fw_reset_input {
u8 host_idx;
u8 flags;
#define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
u8 unused_0[4];
};
@@ -6948,7 +8805,13 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 unused_0[6];
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
u8 valid;
};
@@ -7000,7 +8863,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
__le16 len;
u8 version;
u8 count;
@@ -7103,6 +8967,86 @@ struct hwrm_fw_get_structured_data_cmd_err {
u8 unused_0[7];
};
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
__le16 req_type;
@@ -7214,11 +9158,14 @@ struct hwrm_temp_monitor_query_output {
u8 phy_temp;
u8 om_temp;
u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- u8 unused_0[3];
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
u8 valid;
};
@@ -7361,6 +9308,145 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -7371,7 +9457,8 @@ struct coredump_segment_record {
u8 seg_flags;
u8 compress_flags;
#define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[6];
+ u8 unused_0[2];
+ __le32 segment_len;
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7433,6 +9520,9 @@ struct hwrm_dbg_coredump_initiate_output {
struct coredump_data_hdr {
__le32 address;
__le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
__le32 instance;
__le32 next_offset;
};
@@ -7484,7 +9574,8 @@ struct hwrm_dbg_ring_info_get_input {
#define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le32 fw_ring_id;
};
@@ -7497,7 +9588,8 @@ struct hwrm_dbg_ring_info_get_output {
__le16 resp_len;
__le32 producer_index;
__le32 consumer_index;
- u8 unused_0[7];
+ __le32 cag_vector_ctrl;
+ u8 unused_0[3];
u8 valid;
};
@@ -7567,7 +9659,7 @@ struct hwrm_nvm_get_dir_info_output {
u8 valid;
};
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
struct hwrm_nvm_write_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7583,7 +9675,11 @@ struct hwrm_nvm_write_input {
__le16 option;
__le16 flags;
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
__le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
__le32 unused_0;
};
@@ -7618,7 +9714,9 @@ struct hwrm_nvm_modify_input {
__le64 resp_addr;
__le64 host_src_addr;
__le16 dir_idx;
- u8 unused_0[2];
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
__le32 offset;
__le32 len;
u8 unused_1[4];
@@ -7702,7 +9800,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -7717,6 +9815,22 @@ struct hwrm_nvm_get_dev_info_output {
u8 nvm_cfg_ver_maj;
u8 nvm_cfg_ver_min;
u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ u8 unused_0[7];
u8 valid;
};
@@ -7784,6 +9898,7 @@ struct hwrm_nvm_install_update_input {
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
u8 unused_0[2];
};
@@ -7795,8 +9910,35 @@ struct hwrm_nvm_install_update_output {
__le16 resp_len;
__le64 installed_items;
u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
u8 problem_item;
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
@@ -7813,10 +9955,12 @@ struct hwrm_nvm_install_update_output {
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
u8 unused_0[7];
};
@@ -7963,7 +10107,14 @@ struct hwrm_selftest_qlist_output {
char test5_name[32];
char test6_name[32];
char test7_name[32];
- u8 unused_2[7];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
u8 valid;
};
@@ -8027,4 +10178,54 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
new file mode 100644
index 000000000000..132442f16fe6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -0,0 +1,817 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2020 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+
+static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
+}
+
+/**
+ * __hwrm_req_init() - Initialize an HWRM request.
+ * @bp: The driver context.
+ * @req: A pointer to the request pointer to initialize.
+ * @req_type: The request type. This will be converted to the little endian
+ * before being written to the req_type field of the returned request.
+ * @req_len: The length of the request to be allocated.
+ *
+ * Allocate DMA resources and initialize a new HWRM request object of the
+ * given type. The response address field in the request is configured with
+ * the DMA bus address that has been mapped for the response and the passed
+ * request is pointed to kernel virtual memory mapped for the request (such
+ * that short_input indirection can be accomplished without copying). The
+ * request’s target and completion ring are initialized to default values and
+ * can be overridden by writing to the returned request object directly.
+ *
+ * The initialized request can be further customized by writing to its fields
+ * directly, taking care to covert such fields to little endian. The request
+ * object will be consumed (and all its associated resources release) upon
+ * passing it to hwrm_req_send() unless ownership of the request has been
+ * claimed by the caller via a call to hwrm_req_hold(). If the request is not
+ * consumed, either because it is never sent or because ownership has been
+ * claimed, then it must be released by a call to hwrm_req_drop().
+ *
+ * Return: zero on success, negative error code otherwise:
+ * E2BIG: the type of request pointer is too large to fit.
+ * ENOMEM: an allocation failure occurred.
+ */
+int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
+{
+ struct bnxt_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNXT_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnxt_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ /* can only be due to software bug, be loud */
+ netdev_err(bp->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ /* can only be due to software bug, be loud */
+ netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+/**
+ * hwrm_req_timeout() - Set the completion timeout for the request.
+ * @bp: The driver context.
+ * @req: The request to set the timeout.
+ * @timeout: The timeout in milliseconds.
+ *
+ * Set the timeout associated with the request for subsequent calls to
+ * hwrm_req_send(). Some requests are long running and require a different
+ * timeout than the default.
+ */
+void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+/**
+ * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
+ * @bp: The driver context.
+ * @req: The request for which calls to hwrm_req_dma_slice() will have altered
+ * allocation flags.
+ * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
+ * whenever it is used to allocate backing memory for slices. Note that
+ * calls to hwrm_req_dma_slice() will not always result in new allocations,
+ * however, memory suballocated from the request buffer is already
+ * __GFP_ZERO.
+ *
+ * Sets the GFP allocation flags associated with the request for subsequent
+ * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
+ * for slice allocations.
+ */
+void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+/**
+ * hwrm_req_replace() - Replace request data.
+ * @bp: The driver context.
+ * @req: The request to modify. A call to hwrm_req_replace() is conceptually
+ * an assignment of new_req to req. Subsequent calls to HWRM API functions,
+ * such as hwrm_req_send(), should thus use req and not new_req (in fact,
+ * calls to HWRM API functions will fail if non-managed request objects
+ * are passed).
+ * @len: The length of new_req.
+ * @new_req: The pre-built request to copy or reference.
+ *
+ * Replaces the request data in req with that of new_req. This is useful in
+ * scenarios where a request object has already been constructed by a third
+ * party prior to creating a resource managed request using hwrm_req_init().
+ * Depending on the length, hwrm_req_replace() will either copy the new
+ * request data into the DMA memory allocated for req, or it will simply
+ * reference the new request and use it in lieu of req during subsequent
+ * calls to hwrm_req_send(). The resource management is associated with
+ * req and is independent of and does not apply to new_req. The caller must
+ * ensure that the lifetime of new_req is least as long as req. Any slices
+ * that may have been associated with the original request are released.
+ *
+ * Return: zero on success, negative error code otherwise:
+ * E2BIG: Request is too large.
+ * EINVAL: Invalid request to modify.
+ */
+int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+ struct input *internal_req = req;
+ u16 req_type;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (len > BNXT_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ /* free any existing slices */
+ ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
+ if (ctx->slice_addr) {
+ dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+ ctx->slice_addr = NULL;
+ }
+ ctx->gfp = GFP_KERNEL;
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
+ memcpy(internal_req, new_req, len);
+ } else {
+ internal_req->req_type = ((struct input *)new_req)->req_type;
+ ctx->req = new_req;
+ }
+
+ ctx->req_len = len;
+ ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
+ BNXT_HWRM_RESP_OFFSET);
+
+ /* update sentinel for potentially new request type */
+ req_type = le16_to_cpu(internal_req->req_type);
+ ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
+
+ return 0;
+}
+
+/**
+ * hwrm_req_flags() - Set non internal flags of the ctx
+ * @bp: The driver context.
+ * @req: The request containing the HWRM command
+ * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
+ *
+ * ctx flags can be used by the callers to instruct how the subsequent
+ * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
+ * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
+ * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
+ * even if FW is not responding.
+ * This generic function can be used to set any flag that is not an internal flag
+ * of the HWRM module.
+ */
+void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+
+ if (ctx)
+ ctx->flags |= (flags & HWRM_API_FLAGS);
+}
+
+/**
+ * hwrm_req_hold() - Claim ownership of the request's resources.
+ * @bp: The driver context.
+ * @req: A pointer to the request to own. The request will no longer be
+ * consumed by calls to hwrm_req_send().
+ *
+ * Take ownership of the request. Ownership places responsibility on the
+ * caller to free the resources associated with the request via a call to
+ * hwrm_req_drop(). The caller taking ownership implies that a subsequent
+ * call to hwrm_req_send() will not consume the request (ie. sending will
+ * not free the associated resources if the request is owned by the caller).
+ * Taking ownership returns a reference to the response. Retaining and
+ * accessing the response data is the most common reason to take ownership
+ * of the request. Ownership can also be acquired in order to reuse the same
+ * request object across multiple invocations of hwrm_req_send().
+ *
+ * Return: A pointer to the response object.
+ *
+ * The resources associated with the response will remain available to the
+ * caller until ownership of the request is relinquished via a call to
+ * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
+ * a valid request is provided. A returned NULL value would imply a driver
+ * bug and the implementation will complain loudly in the logs to aid in
+ * detection. It should not be necessary to check the result for NULL.
+ */
+void *hwrm_req_hold(struct bnxt *bp, void *req)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
+ /* can only be due to software bug, be loud */
+ netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
+}
+
+/**
+ * hwrm_req_drop() - Release all resources associated with the request.
+ * @bp: The driver context.
+ * @req: The request to consume, releasing the associated resources. The
+ * request object, any slices, and its associated response are no
+ * longer valid.
+ *
+ * It is legal to call hwrm_req_drop() on an unowned request, provided it
+ * has not already been consumed by hwrm_req_send() (for example, to release
+ * an aborted request). A given request should not be dropped more than once,
+ * nor should it be dropped after having been consumed by hwrm_req_send(). To
+ * do so is an error (the context will not be found and a stack trace will be
+ * rendered in the kernel log).
+ */
+void hwrm_req_drop(struct bnxt *bp, void *req)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+
+ if (ctx)
+ __hwrm_ctx_drop(bp, ctx);
+}
+
+static int __hwrm_to_stderr(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnxt_hwrm_wait_token *
+__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
+{
+ struct bnxt_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNXT_HWRM_PENDING;
+ if (dst == BNXT_HWRM_CHNL_CHIMP) {
+ token->seq_id = bp->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
+ } else {
+ token->seq_id = bp->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
+{
+ if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void
+hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
+{
+ struct bnxt_hwrm_wait_token *token;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
+ if (token->seq_id == seq_id) {
+ WRITE_ONCE(token->state, state);
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+}
+
+static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNXT_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define hwrm_err(bp, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
+ netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
+ else \
+ netdev_err((bp)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
+{
+ if (req_type == HWRM_VER_GET)
+ return false;
+
+ if (!bp->fw_health || !bp->fw_health->status_reliable)
+ return false;
+
+ *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
+}
+
+static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
+ struct bnxt_hwrm_wait_token *token = NULL;
+ struct hwrm_short_input short_input = {0};
+ u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ u32 req_type, sts;
+ int rc = -EBUSY;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
+ netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
+ req_type);
+ goto exit;
+ }
+
+ if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
+ msg_len > bp->hwrm_max_ext_req_len) {
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ if (bnxt_kong_hwrm_message(bp, ctx->req)) {
+ dst = BNXT_HWRM_CHNL_KONG;
+ bar_offset = BNXT_GRCPF_REG_KONG_COMM;
+ doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
+ req_type);
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ token = __hwrm_acquire_token(bp, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+ short_input.req_type = ctx->req->req_type;
+ short_input.signature =
+ cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = cpu_to_le16(msg_len);
+ short_input.req_addr = cpu_to_le64(ctx->dma_handle);
+
+ data = (u32 *)&short_input;
+ msg_len = sizeof(short_input);
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bp->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bp->bar0 + doorbell_offset);
+
+ hwrm_req_dbg(bp, ctx->req);
+
+ if (!pci_is_enabled(bp->pdev)) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ goto exit;
+ /* on first few passes, just barely sleep */
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
+ req_type, sts);
+ goto exit;
+ }
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
+ hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ goto exit;
+
+ if (token &&
+ READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
+ __hwrm_release_token(bp, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq),
+ req_type,
+ le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
+ req_type,
+ le16_to_cpu(ctx->req->seq_id),
+ len, sts);
+ goto exit;
+ }
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= HWRM_VALID_BIT_DELAY_USEC) {
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ hwrm_total_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
+ netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ req_type, token->seq_id, rc);
+ rc = __hwrm_to_stderr(rc);
+exit:
+ if (token)
+ __hwrm_release_token(bp, token);
+ if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_drop(bp, ctx);
+ return rc;
+}
+
+/**
+ * hwrm_req_send() - Execute an HWRM command.
+ * @bp: The driver context.
+ * @req: A pointer to the request to send. The DMA resources associated with
+ * the request will be released (ie. the request will be consumed) unless
+ * ownership of the request has been assumed by the caller via a call to
+ * hwrm_req_hold().
+ *
+ * Send an HWRM request to the device and wait for a response. The request is
+ * consumed if it is not owned by the caller. This function will block until
+ * the request has either completed or times out due to an error.
+ *
+ * Return: A result code.
+ *
+ * The result is zero on success, otherwise the negative error code indicates
+ * one of the following errors:
+ * E2BIG: The request was too large.
+ * EBUSY: The firmware is in a fatal state or the request timed out
+ * EACCESS: HWRM access denied.
+ * ENOSPC: HWRM resource allocation error.
+ * EINVAL: Request parameters are invalid.
+ * ENOMEM: HWRM has no buffers.
+ * EAGAIN: HWRM busy or reset in progress.
+ * EOPNOTSUPP: Invalid request type.
+ * EIO: Any other error.
+ * Error handling is orthogonal to request ownership. An unowned request will
+ * still be consumed on error. If the caller owns the request, then the caller
+ * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
+ * always consume the request.
+ */
+int hwrm_req_send(struct bnxt *bp, void *req)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send(bp, ctx);
+}
+
+/**
+ * hwrm_req_send_silent() - A silent version of hwrm_req_send().
+ * @bp: The driver context.
+ * @req: The request to send without logging.
+ *
+ * The same as hwrm_req_send(), except that the request is silenced using
+ * hwrm_req_silence() prior the call. This version of the function is
+ * provided solely to preserve the legacy API’s flavor for this functionality.
+ *
+ * Return: A result code, see hwrm_req_send().
+ */
+int hwrm_req_send_silent(struct bnxt *bp, void *req)
+{
+ hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
+ return hwrm_req_send(bp, req);
+}
+
+/**
+ * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
+ * @bp: The driver context.
+ * @req: The request for which indirect data will be associated.
+ * @size: The size of the allocation.
+ * @dma_handle: The bus address associated with the allocation. The HWRM API has
+ * no knowledge about the type of the request and so cannot infer how the
+ * caller intends to use the indirect data. Thus, the caller is
+ * responsible for configuring the request object appropriately to
+ * point to the associated indirect memory. Note, DMA handle has the
+ * same definition as it does in dma_alloc_coherent(), the caller is
+ * responsible for endian conversions via cpu_to_le64() before assigning
+ * this address.
+ *
+ * Allocates DMA mapped memory for indirect data related to a request. The
+ * lifetime of the DMA resources will be bound to that of the request (ie.
+ * they will be automatically released when the request is either consumed by
+ * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
+ * efficiently suballocated out of the request buffer space, hence the name
+ * slice, while larger requests are satisfied via an underlying call to
+ * dma_alloc_coherent(). Multiple suballocations are supported, however, only
+ * one externally mapped region is.
+ *
+ * Return: The kernel virtual address of the DMA mapping.
+ */
+void *
+hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
+{
+ struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
+ u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ /* could not suballocate from ctx buffer, try create a new mapping */
+ if (ctx->slice_addr) {
+ /* if one exists, can only be due to software bug, be loud */
+ netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
+
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
new file mode 100644
index 000000000000..c98032e38188
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -0,0 +1,140 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2020 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HWRM_H
+#define BNXT_HWRM_H
+
+#include "bnxt_hsi.h"
+
+enum bnxt_hwrm_ctx_flags {
+ /* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
+ BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */
+ BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */
+ BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */
+ BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */
+};
+
+#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT)
+
+struct bnxt_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnxt_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnxt_hwrm_wait_state {
+ BNXT_HWRM_PENDING,
+ BNXT_HWRM_DEFERRED,
+ BNXT_HWRM_COMPLETE,
+ BNXT_HWRM_CANCELLED,
+};
+
+enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG };
+
+struct bnxt_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnxt_hwrm_wait_state state;
+ enum bnxt_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
+
+#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define SHORT_HWRM_CMD_TIMEOUT 20
+#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
+#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
+#define BNXT_HWRM_TARGET 0xffff
+#define BNXT_HWRM_NO_CMPL_RING -1
+#define BNXT_HWRM_REQ_MAX_SIZE 128
+#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \
+ BNXT_HWRM_RESP_RESERVED)
+#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \
+ sizeof(struct bnxt_hwrm_ctx))
+#define BNXT_HWRM_DMA_ALIGN 16
+#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
+ BNXT_HWRM_REQ_MAX_SIZE)
+#define HWRM_SHORT_MIN_TIMEOUT 3
+#define HWRM_SHORT_MAX_TIMEOUT 10
+#define HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define HWRM_MIN_TIMEOUT 25
+#define HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int hwrm_total_timeout(unsigned int n)
+{
+ return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
+ HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +
+ (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT;
+}
+
+
+#define HWRM_VALID_BIT_DELAY_USEC 50000
+
+static inline bool bnxt_cfa_hwrm_message(u16 req_type)
+{
+ switch (req_type) {
+ case HWRM_CFA_ENCAP_RECORD_ALLOC:
+ case HWRM_CFA_ENCAP_RECORD_FREE:
+ case HWRM_CFA_DECAP_FILTER_ALLOC:
+ case HWRM_CFA_DECAP_FILTER_FREE:
+ case HWRM_CFA_EM_FLOW_ALLOC:
+ case HWRM_CFA_EM_FLOW_FREE:
+ case HWRM_CFA_EM_FLOW_CFG:
+ case HWRM_CFA_FLOW_ALLOC:
+ case HWRM_CFA_FLOW_FREE:
+ case HWRM_CFA_FLOW_INFO:
+ case HWRM_CFA_FLOW_FLUSH:
+ case HWRM_CFA_FLOW_STATS:
+ case HWRM_CFA_METER_PROFILE_ALLOC:
+ case HWRM_CFA_METER_PROFILE_FREE:
+ case HWRM_CFA_METER_PROFILE_CFG:
+ case HWRM_CFA_METER_INSTANCE_ALLOC:
+ case HWRM_CFA_METER_INSTANCE_FREE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
+{
+ return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
+ (bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)) ||
+ le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG));
+}
+
+int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len);
+#define hwrm_req_init(bp, req, req_type) \
+ __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req)))
+void *hwrm_req_hold(struct bnxt *bp, void *req);
+void hwrm_req_drop(struct bnxt *bp, void *req);
+void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags);
+void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout);
+int hwrm_req_send(struct bnxt *bp, void *req);
+int hwrm_req_send_silent(struct bnxt *bp, void *req);
+int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len);
+void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags);
+void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
new file mode 100644
index 000000000000..2132ce63193c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -0,0 +1,980 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/timekeeping.h>
+#include <linux/ptp_classify.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ptp.h"
+
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+
+ ptp_class = ptp_classify_raw(skb);
+
+ switch (ptp_class & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ case PTP_CLASS_V2:
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *hdr_off = (u8 *)hdr - skb->data;
+ *seq_id = ntohs(hdr->sequence_id);
+ return 0;
+ default:
+ return -ERANGE;
+ }
+}
+
+static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ u64 ns = timespec64_to_ns(ts);
+
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
+}
+
+/* Caller holds ptp_lock */
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EIO;
+
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
+ return 0;
+}
+
+static void bnxt_ptp_get_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return;
+ spin_lock_bh(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
+ spin_unlock_bh(&ptp->ptp_lock);
+}
+
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+{
+ struct hwrm_port_ts_query_output *resp;
+ struct hwrm_port_ts_query_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
+ PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
+ req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
+ req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ }
+ resp = hwrm_req_hold(bp, req);
+
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *ts = le64_to_cpu(resp->ptp_msg_ts);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ u64 ns, cycles;
+ int rc;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
+ ns = timecounter_cyc2time(&ptp->tc, cycles);
+ spin_unlock_bh(&ptp->ptp_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(ptp->bp);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+
+ return rc;
+}
+
+static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_adjphc(ptp, delta);
+
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_adjtime(&ptp->tc, delta);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
+}
+
+static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct hwrm_port_mac_cfg_input *req;
+ struct bnxt *bp = ptp->bp;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc)
+ netdev_err(ptp->bp->dev,
+ "ptp adjfreq failed. rc = %d\n", rc);
+ return rc;
+}
+
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_event event;
+ u64 ns, pps_ts;
+
+ pps_ts = EVENT_PPS_TS(data2, data1);
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, pps_ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
+ event.pps_times.ts_real = ns_to_timespec64(ns);
+ event.type = PTP_CLOCK_PPSUSR;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL:
+ event.timestamp = ns;
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ }
+
+ ptp_clock_event(bp->ptp_cfg->ptp_clock, &event);
+}
+
+static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
+{
+ struct hwrm_func_ptp_pin_cfg_input *req;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u8 state = usage != BNXT_PPS_PIN_NONE;
+ u8 *pin_state, *pin_usg;
+ u32 enables;
+ int rc;
+
+ if (!TSIO_PIN_VALID(pin)) {
+ netdev_err(ptp->bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG);
+ if (rc)
+ return rc;
+
+ enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
+ FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
+ req->enables = cpu_to_le32(enables);
+
+ pin_state = &req->pin0_state;
+ pin_usg = &req->pin0_usage;
+
+ *(pin_state + (pin * 2)) = state;
+ *(pin_usg + (pin * 2)) = usage;
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc)
+ return rc;
+
+ ptp->pps_info.pins[pin].usage = usage;
+ ptp->pps_info.pins[pin].state = state;
+
+ return 0;
+}
+
+static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
+ req->ptp_pps_event = event;
+ return hwrm_req_send(bp, req);
+}
+
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct hwrm_port_mac_cfg_input *req;
+
+ if (!ptp || !ptp->tstamp_filters)
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ goto out;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+ req->flags = cpu_to_le32(ptp->tstamp_filters);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ if (!hwrm_req_send(bp, req)) {
+ bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
+ return;
+ }
+ ptp->tstamp_filters = 0;
+out:
+ bp->ptp_all_rx_tstamp = 0;
+ netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+}
+
+void bnxt_ptp_reapply_pps(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pps *pps;
+ u32 pin = 0;
+ int rc;
+
+ if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) ||
+ !(ptp->ptp_info.pin_config))
+ return;
+ pps = &ptp->pps_info;
+ for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) {
+ if (pps->pins[pin].state) {
+ rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage);
+ if (!rc && pps->pins[pin].event)
+ rc = bnxt_ptp_cfg_event(bp,
+ pps->pins[pin].event);
+ if (rc)
+ netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n",
+ pin);
+ }
+ }
+}
+
+static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
+ u64 *cycles_delta)
+{
+ u64 cycles_now;
+ u64 nsec_now, nsec_delta;
+ int rc;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
+ nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_delta = target_ns - nsec_now;
+ *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
+ return 0;
+}
+
+static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
+ struct ptp_clock_request *rq)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ struct bnxt *bp = ptp->bp;
+ struct timespec64 ts;
+ u64 target_ns, delta;
+ u16 enables;
+ int rc;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ target_ns = timespec64_to_ns(&ts);
+
+ rc = bnxt_get_target_cycles(ptp, target_ns, &delta);
+ if (rc)
+ return rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
+ req->enables = cpu_to_le16(enables);
+ req->ptp_pps_event = 0;
+ req->ptp_freq_adj_dll_source = 0;
+ req->ptp_freq_adj_dll_phase = 0;
+ req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
+ req->ptp_freq_adj_ext_up = 0;
+ req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
+
+ return hwrm_req_send(bp, req);
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct bnxt *bp = ptp->bp;
+ int pin_id;
+ int rc;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Configure an External PPS IN */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
+ if (!on)
+ break;
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL;
+ return rc;
+ case PTP_CLK_REQ_PEROUT:
+ /* Configure a Periodic PPS OUT */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
+ if (!on)
+ break;
+
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT);
+ if (!rc)
+ rc = bnxt_ptp_perout_cfg(ptp, rq);
+
+ return rc;
+ case PTP_CLK_REQ_PPS:
+ /* Configure PHC PPS IN */
+ rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL;
+ return rc;
+ default:
+ netdev_err(ptp->bp->dev, "Unrecognized PIN function\n");
+ return -EOPNOTSUPP;
+ }
+
+ return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE);
+}
+
+static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 flags = 0;
+ int rc = 0;
+
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ break;
+ }
+
+ if (ptp->tx_tstamp_en)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+
+ ptp->tstamp_filters = flags;
+
+ if (netif_running(bp->dev)) {
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
+ if (!rc && !ptp->tstamp_filters)
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+ struct bnxt_ptp_cfg *ptp;
+ u16 old_rxctl;
+ int old_rx_filter, rc;
+ u8 old_tx_tstamp_en;
+
+ ptp = bp->ptp_cfg;
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+ return -EFAULT;
+
+ if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+ stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ return -ERANGE;
+
+ old_rx_filter = ptp->rx_filter;
+ old_rxctl = ptp->rxctl;
+ old_tx_tstamp_en = ptp->tx_tstamp_en;
+ switch (stmpconf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp->rxctl = 0;
+ ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) {
+ ptp->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ return -EOPNOTSUPP;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ ptp->rxctl = BNXT_PTP_MSG_SYNC;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp->rxctl = BNXT_PTP_MSG_DELAY_REQ;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ ptp->tx_tstamp_en = 1;
+ else
+ ptp->tx_tstamp_en = 0;
+
+ rc = bnxt_hwrm_ptp_cfg(bp);
+ if (rc)
+ goto ts_set_err;
+
+ stmpconf.rx_filter = ptp->rx_filter;
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+
+ts_set_err:
+ ptp->rx_filter = old_rx_filter;
+ ptp->rxctl = old_rxctl;
+ ptp->tx_tstamp_en = old_tx_tstamp_en;
+ return rc;
+}
+
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+ struct bnxt_ptp_cfg *ptp;
+
+ ptp = bp->ptp_cfg;
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ stmpconf.rx_filter = ptp->rx_filter;
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
+static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
+{
+ u32 reg_base = *reg_arr & BNXT_GRC_BASE_MASK;
+ u32 win_off;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((reg_arr[i] & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ }
+ win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+ writel(reg_base, bp->bar0 + win_off);
+ return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 *reg_arr;
+ int rc, i;
+
+ reg_arr = ptp->refclk_regs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
+ if (rc)
+ return rc;
+ for (i = 0; i < 2; i++)
+ ptp->refclk_mapped_regs[i] = BNXT_PTP_GRC_WIN_BASE +
+ (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+ writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ (BNXT_PTP_GRC_WIN - 1) * 4);
+}
+
+static u64 bnxt_cc_read(const struct cyclecounter *cc)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+ u64 ns = 0;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ns);
+ return ns;
+}
+
+static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct skb_shared_hwtstamps timestamp;
+ u64 ts = 0, ns = 0;
+ int rc;
+
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ if (!rc) {
+ memset(&timestamp, 0, sizeof(timestamp));
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ } else {
+ netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
+ rc);
+ }
+
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ atomic_inc(&ptp->tx_avail);
+}
+
+static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ unsigned long now = jiffies;
+ struct bnxt *bp = ptp->bp;
+
+ if (ptp->tx_skb)
+ bnxt_stamp_tx_skb(bp, ptp->tx_skb);
+
+ if (!time_after_eq(now, ptp->next_period))
+ return ptp->next_period - now;
+
+ bnxt_ptp_get_current_time(bp);
+ ptp->next_period = now + HZ;
+ if (time_after_eq(now, ptp->next_overflow_check)) {
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ return HZ;
+}
+
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->tx_skb) {
+ netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
+ return -EBUSY;
+ }
+ ptp->tx_skb = skb;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ return 0;
+}
+
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 time;
+
+ if (!ptp)
+ return -ENODEV;
+
+ BNXT_READ_TIME64(ptp, time, ptp->old_time);
+ *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
+ if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
+ *ts += BNXT_LO_TIMER_MASK + 1;
+
+ return 0;
+}
+
+static const struct ptp_clock_info bnxt_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "bnxt clock",
+ .max_adj = BNXT_MAX_PHC_DRIFT,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = bnxt_ptp_adjfreq,
+ .adjtime = bnxt_ptp_adjtime,
+ .do_aux_work = bnxt_ptp_ts_aux_work,
+ .gettimex64 = bnxt_ptp_gettimex,
+ .settime64 = bnxt_ptp_settime,
+ .enable = bnxt_ptp_enable,
+};
+
+static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ /* Allow only PPS pin function configuration */
+ if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT &&
+ func != PTP_PF_PHYSYNC)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+}
+
+static int bnxt_ptp_pps_init(struct bnxt *bp)
+{
+ struct hwrm_func_ptp_pin_qcfg_output *resp;
+ struct hwrm_func_ptp_pin_qcfg_input *req;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_info *ptp_info;
+ struct bnxt_pps *pps_info;
+ u8 *pin_usg;
+ u32 i, rc;
+
+ /* Query current/default PIN CFG */
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG);
+ if (rc)
+ return rc;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc || !resp->num_pins) {
+ hwrm_req_drop(bp, req);
+ return -EOPNOTSUPP;
+ }
+
+ ptp_info = &ptp->ptp_info;
+ pps_info = &ptp->pps_info;
+ pps_info->num_pins = resp->num_pins;
+ ptp_info->n_pins = pps_info->num_pins;
+ ptp_info->pin_config = kcalloc(ptp_info->n_pins,
+ sizeof(*ptp_info->pin_config),
+ GFP_KERNEL);
+ if (!ptp_info->pin_config) {
+ hwrm_req_drop(bp, req);
+ return -ENOMEM;
+ }
+
+ /* Report the TSIO capability to kernel */
+ pin_usg = &resp->pin0_usage;
+ for (i = 0; i < pps_info->num_pins; i++, pin_usg++) {
+ snprintf(ptp_info->pin_config[i].name,
+ sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
+ ptp_info->pin_config[i].index = i;
+ ptp_info->pin_config[i].chan = i;
+ if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
+ ptp_info->pin_config[i].func = PTP_PF_EXTTS;
+ else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
+ ptp_info->pin_config[i].func = PTP_PF_PEROUT;
+ else
+ ptp_info->pin_config[i].func = PTP_PF_NONE;
+
+ pps_info->pins[i].usage = *pin_usg;
+ }
+ hwrm_req_drop(bp, req);
+
+ /* Only 1 each of ext_ts and per_out pins is available in HW */
+ ptp_info->n_ext_ts = 1;
+ ptp_info->n_per_out = 1;
+ ptp_info->pps = 1;
+ ptp_info->verify = bnxt_ptp_verify;
+
+ return 0;
+}
+
+static bool bnxt_pps_config_ok(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
+}
+
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc)
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ if (rc)
+ return rc;
+ }
+ spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
+}
+
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ rc = bnxt_map_ptp_regs(bp);
+ if (rc)
+ return rc;
+
+ if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
+ return 0;
+
+ bnxt_ptp_free(bp);
+
+ atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+ spin_lock_init(&ptp->ptp_lock);
+
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ } else {
+ bnxt_ptp_timecounter_init(bp, true);
+ }
+
+ ptp->ptp_info = bnxt_ptp_caps;
+ if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+ if (bnxt_ptp_pps_init(bp))
+ netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n");
+ }
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
+ if (IS_ERR(ptp->ptp_clock)) {
+ int err = PTR_ERR(ptp->ptp_clock);
+
+ ptp->ptp_clock = NULL;
+ rc = err;
+ goto out;
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ }
+ return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
+}
+
+void bnxt_ptp_clear(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return;
+
+ if (ptp->ptp_clock)
+ ptp_clock_unregister(ptp->ptp_clock);
+
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+
+ if (ptp->tx_skb) {
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ }
+ bnxt_unmap_ptp_regs(bp);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
new file mode 100644
index 000000000000..4ce0a14c1e23
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -0,0 +1,147 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_PTP_H
+#define BNXT_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+#define BNXT_PTP_GRC_WIN 6
+#define BNXT_PTP_GRC_WIN_BASE 0x6000
+
+#define BNXT_MAX_PHC_DRIFT 31000000
+#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
+#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+
+#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
+ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
+
+struct pps_pin {
+ u8 event;
+ u8 usage;
+ u8 state;
+};
+
+#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
+
+#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
+ ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
+
+#define EVENT_DATA2_PPS_PIN_NUM(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT)
+
+#define BNXT_DATA2_UPPER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK
+
+#define BNXT_DATA2_UPPER_SFT \
+ (32 - \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT)
+
+#define BNXT_DATA1_LOWER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK
+
+#define BNXT_DATA1_LOWER_SFT \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT
+
+#define EVENT_PPS_TS(data2, data1) \
+ (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\
+ (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT))
+
+#define BNXT_PPS_PIN_DISABLE 0
+#define BNXT_PPS_PIN_ENABLE 1
+#define BNXT_PPS_PIN_NONE 0
+#define BNXT_PPS_PIN_PPS_IN 1
+#define BNXT_PPS_PIN_PPS_OUT 2
+#define BNXT_PPS_PIN_SYNC_IN 3
+#define BNXT_PPS_PIN_SYNC_OUT 4
+
+#define BNXT_PPS_EVENT_INTERNAL 1
+#define BNXT_PPS_EVENT_EXTERNAL 2
+
+struct bnxt_pps {
+ u8 num_pins;
+#define BNXT_MAX_TSIO_PINS 4
+ struct pps_pin pins[BNXT_MAX_TSIO_PINS];
+};
+
+struct bnxt_ptp_cfg {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct bnxt_pps pps_info;
+ /* serialize timecounter access */
+ spinlock_t ptp_lock;
+ struct sk_buff *tx_skb;
+ u64 current_time;
+ u64 old_time;
+ unsigned long next_period;
+ unsigned long next_overflow_check;
+ /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ struct bnxt *bp;
+ atomic_t tx_avail;
+#define BNXT_MAX_TX_TS 1
+ u16 rxctl;
+#define BNXT_PTP_MSG_SYNC (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
+#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
+ BNXT_PTP_MSG_DELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_RESP)
+ u8 tx_tstamp_en:1;
+ int rx_filter;
+ u32 tstamp_filters;
+
+ u32 refclk_regs[2];
+ u32 refclk_mapped_regs[2];
+};
+
+#if BITS_PER_LONG == 32
+#define BNXT_READ_TIME64(ptp, dst, src) \
+do { \
+ spin_lock_bh(&(ptp)->ptp_lock); \
+ (dst) = (src); \
+ spin_unlock_bh(&(ptp)->ptp_lock); \
+} while (0)
+#else
+#define BNXT_READ_TIME64(ptp, dst, src) \
+ ((dst) = READ_ONCE(src))
+#endif
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
+void bnxt_ptp_reapply_pps(struct bnxt *bp);
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
+void bnxt_ptp_clear(struct bnxt *bp);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2aba1e02a8f4..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -8,6 +8,7 @@
* the Free Software Foundation.
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -16,6 +17,7 @@
#include <linux/etherdevice.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_vfr.h"
@@ -25,21 +27,26 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
- struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_fwd_async_event_cmpl_input *req;
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
+ if (rc)
+ goto exit;
+
if (vf)
- req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
else
/* broadcast this async event to all VFs */
- req.encap_async_event_target_id = cpu_to_le16(0xffff);
- async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ req->encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl =
+ (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
+exit:
if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
@@ -48,10 +55,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
- if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- netdev_err(bp->dev, "vf ndo called though PF is down\n");
- return -EINVAL;
- }
if (!bp->pf.active_vfs) {
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
@@ -65,10 +68,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
{
- struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_vf_info *vf;
+ struct hwrm_func_cfg_input *req;
bool old_setting = false;
+ struct bnxt_vf_info *vf;
u32 func_flags;
int rc;
@@ -85,50 +88,50 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting)
return 0;
- func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(func_flags);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (!rc) {
- vf->func_flags = func_flags;
- if (setting)
- vf->flags |= BNXT_VF_SPOOFCHK;
- else
- vf->flags &= ~BNXT_VF_SPOOFCHK;
+ req->fid = cpu_to_le16(vf->fw_fid);
+ req->flags = cpu_to_le32(func_flags);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ if (setting)
+ vf->flags |= BNXT_VF_SPOOFCHK;
+ else
+ vf->flags &= ~BNXT_VF_SPOOFCHK;
+ }
}
return rc;
}
static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
{
- struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc) {
- mutex_unlock(&bp->hwrm_cmd_lock);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+ if (rc)
return rc;
- }
- vf->func_qcfg_flags = le16_to_cpu(resp->flags);
- mutex_unlock(&bp->hwrm_cmd_lock);
- return 0;
+
+ req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ vf->func_qcfg_flags = le16_to_cpu(resp->flags);
+ hwrm_req_drop(bp, req);
+ return rc;
}
-static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
+bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
- if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return !!(vf->flags & BNXT_VF_TRUST);
bnxt_hwrm_func_qcfg_flags(bp, vf);
@@ -137,20 +140,22 @@ static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
if (vf->flags & BNXT_VF_TRUST)
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
else
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
+ return hwrm_req_send(bp, req);
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -210,8 +215,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
{
- struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
int rc;
@@ -227,20 +232,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
}
vf = &bp->pf.vf[vf_id];
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
memcpy(vf->mac_addr, mac, ETH_ALEN);
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
- memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
+ return hwrm_req_send(bp, req);
}
int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
- struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
u16 vlan_tag;
int rc;
@@ -266,22 +274,23 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (vlan_tag == vf->vlan)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
- req.dflt_vlan = cpu_to_le16(vlan_tag);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
- vf->vlan = vlan_tag;
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (!rc) {
+ req->fid = cpu_to_le16(vf->fw_fid);
+ req->dflt_vlan = cpu_to_le16(vlan_tag);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ vf->vlan = vlan_tag;
+ }
return rc;
}
int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
int max_tx_rate)
{
- struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
u32 pf_link_speed;
int rc;
@@ -298,24 +307,25 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
}
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
- req.max_bw = cpu_to_le32(max_tx_rate);
- req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
- req.min_bw = cpu_to_le32(min_tx_rate);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (!rc) {
- vf->min_tx_rate = min_tx_rate;
- vf->max_tx_rate = max_tx_rate;
+ req->fid = cpu_to_le16(vf->fw_fid);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
+ FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req->max_bw = cpu_to_le32(max_tx_rate);
+ req->min_bw = cpu_to_le32(min_tx_rate);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ vf->min_tx_rate = min_tx_rate;
+ vf->max_tx_rate = max_tx_rate;
+ }
}
return rc;
}
@@ -368,21 +378,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
{
- int i, rc = 0;
+ struct hwrm_func_vf_resc_free_input *req;
struct bnxt_pf_info *pf = &bp->pf;
- struct hwrm_func_vf_resc_free_input req = {0};
+ int i, rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
+ if (rc)
+ return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_req_hold(bp, req);
for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
- req.vf_id = cpu_to_le16(i);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ req->vf_id = cpu_to_le16(i);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -403,6 +414,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
}
}
+ bp->pf.active_vfs = 0;
kfree(bp->pf.vf);
bp->pf.vf = NULL;
}
@@ -455,52 +467,55 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
{
- struct hwrm_func_buf_rgtr_input req = {0};
+ struct hwrm_func_buf_rgtr_input *req;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
+ if (rc)
+ return rc;
- req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
- req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
- req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
- req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
- req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
- req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
- req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+ req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+ req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+ req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+ req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+ req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+ req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+ req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req);
}
-/* Caller holds bp->hwrm_cmd_lock mutex lock */
-static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
+static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
{
- struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
vf = &bp->pf.vf[vf_id];
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
+ req->fid = cpu_to_le16(vf->fw_fid);
if (is_valid_ether_addr(vf->mac_addr)) {
- req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
- memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
}
if (vf->vlan) {
- req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
- req.dflt_vlan = cpu_to_le16(vf->vlan);
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ req->dflt_vlan = cpu_to_le16(vf->vlan);
}
if (vf->max_tx_rate) {
- req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
- req.max_bw = cpu_to_le32(vf->max_tx_rate);
-#ifdef HAVE_IFLA_TX_RATE
- req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
- req.min_bw = cpu_to_le32(vf->min_tx_rate);
-#endif
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
+ FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req->max_bw = cpu_to_le32(vf->max_tx_rate);
+ req->min_bw = cpu_to_le32(vf->min_tx_rate);
}
if (vf->flags & BNXT_VF_TRUST)
- req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+ req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
- _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_req_send(bp, req);
}
/* Only called by PF to reserve resources for VFs, returns actual number of
@@ -508,7 +523,7 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
*/
static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
- struct hwrm_func_vf_resource_cfg_input req = {0};
+ struct hwrm_func_vf_resource_cfg_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
@@ -517,7 +532,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
u16 vf_msix = 0;
u16 vf_rss;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
+ if (rc)
+ return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
@@ -536,21 +553,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
- req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
+ req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
min = 0;
- req.min_rsscos_ctx = cpu_to_le16(min);
+ req->min_rsscos_ctx = cpu_to_le16(min);
}
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
- req.min_cmpl_rings = cpu_to_le16(min);
- req.min_tx_rings = cpu_to_le16(min);
- req.min_rx_rings = cpu_to_le16(min);
- req.min_l2_ctxs = cpu_to_le16(min);
- req.min_vnics = cpu_to_le16(min);
- req.min_stat_ctx = cpu_to_le16(min);
+ req->min_cmpl_rings = cpu_to_le16(min);
+ req->min_tx_rings = cpu_to_le16(min);
+ req->min_rx_rings = cpu_to_le16(min);
+ req->min_l2_ctxs = cpu_to_le16(min);
+ req->min_vnics = cpu_to_le16(min);
+ req->min_stat_ctx = cpu_to_le16(min);
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
- req.min_hw_ring_grps = cpu_to_le16(min);
+ req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -560,56 +577,57 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
- req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
- req.min_tx_rings = cpu_to_le16(vf_tx_rings);
- req.min_rx_rings = cpu_to_le16(vf_rx_rings);
- req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req.min_vnics = cpu_to_le16(vf_vnics);
- req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
- req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
- req.min_rsscos_ctx = cpu_to_le16(vf_rss);
+ req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req->min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req->min_rx_rings = cpu_to_le16(vf_rx_rings);
+ req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+ req->min_vnics = cpu_to_le16(vf_vnics);
+ req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req->min_rsscos_ctx = cpu_to_le16(vf_rss);
}
- req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
- req.max_tx_rings = cpu_to_le16(vf_tx_rings);
- req.max_rx_rings = cpu_to_le16(vf_rx_rings);
- req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req.max_vnics = cpu_to_le16(vf_vnics);
- req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
- req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
- req.max_rsscos_ctx = cpu_to_le16(vf_rss);
+ req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req->max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req->max_rx_rings = cpu_to_le16(vf_rx_rings);
+ req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+ req->max_vnics = cpu_to_le16(vf_vnics);
+ req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req->max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5)
- req.max_msix = cpu_to_le16(vf_msix / num_vfs);
+ req->max_msix = cpu_to_le16(vf_msix / num_vfs);
- mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
if (reset)
__bnxt_set_vf_params(bp, i);
- req.vf_id = cpu_to_le16(pf->first_vf_id + i);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+
if (pf->active_vfs) {
u16 n = pf->active_vfs;
- hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
- hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
- hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
- n;
- hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
- hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
- hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
- hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+ hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
+ hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
+ hw_resc->max_hw_ring_grps -=
+ le16_to_cpu(req->min_hw_ring_grps) * n;
+ hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
+ hw_resc->max_rsscos_ctxs -=
+ le16_to_cpu(req->min_rsscos_ctx) * n;
+ hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
+ hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -618,15 +636,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
*/
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
- u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_func_cfg_input *req;
int total_vf_tx_rings = 0;
u16 vf_ring_grps;
+ u32 mtu, i;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
/* Remaining rings are distributed equally amongs VF's for now */
vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
@@ -642,50 +663,49 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
- FUNC_CFG_REQ_ENABLES_MRU |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
- FUNC_CFG_REQ_ENABLES_NUM_VNICS |
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
-
- mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- req.mru = cpu_to_le16(mtu);
- req.mtu = cpu_to_le16(mtu);
-
- req.num_rsscos_ctxs = cpu_to_le16(1);
- req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
- req.num_tx_rings = cpu_to_le16(vf_tx_rings);
- req.num_rx_rings = cpu_to_le16(vf_rx_rings);
- req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
- req.num_l2_ctxs = cpu_to_le16(4);
-
- req.num_vnics = cpu_to_le16(vf_vnics);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
+ FUNC_CFG_REQ_ENABLES_MRU |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
+
+ mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(mtu);
+ req->admin_mtu = cpu_to_le16(mtu);
+
+ req->num_rsscos_ctxs = cpu_to_le16(1);
+ req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req->num_tx_rings = cpu_to_le16(vf_tx_rings);
+ req->num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req->num_l2_ctxs = cpu_to_le16(4);
+
+ req->num_vnics = cpu_to_le16(vf_vnics);
/* FIXME spec currently uses 1 bit for stats ctx */
- req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+ req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
- mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
int vf_tx_rsvd = vf_tx_rings;
- req.fid = cpu_to_le16(pf->first_vf_id + i);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ req->fid = cpu_to_le16(pf->first_vf_id + i);
+ rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+ pf->vf[i].fw_fid = le16_to_cpu(req->fid);
rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
&vf_tx_rsvd);
if (rc)
break;
total_vf_tx_rings += vf_tx_rsvd;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@@ -803,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc)
+ if (rc) {
+ bnxt_ulp_sriov_cfg(bp, 0);
goto err_out2;
+ }
return 0;
@@ -812,6 +834,9 @@ err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
@@ -826,7 +851,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -839,11 +864,10 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
- bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
@@ -904,23 +928,24 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
void *encap_resp, __le64 encap_resp_addr,
__le16 encap_resp_cpr, u32 msg_size)
{
- int rc = 0;
- struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_input *req;
+ int rc;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
-
- /* Set the new target id */
- req.target_id = cpu_to_le16(vf->fw_fid);
- req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
- req.encap_resp_len = cpu_to_le16(msg_size);
- req.encap_resp_addr = encap_resp_addr;
- req.encap_resp_cmpl_ring = encap_resp_cpr;
- memcpy(req.encap_resp, encap_resp, msg_size);
-
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
+ if (!rc) {
+ /* Set the new target id */
+ req->target_id = cpu_to_le16(vf->fw_fid);
+ req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+ req->encap_resp_len = cpu_to_le16(msg_size);
+ req->encap_resp_addr = encap_resp_addr;
+ req->encap_resp_cmpl_ring = encap_resp_cpr;
+ memcpy(req->encap_resp, encap_resp, msg_size);
+
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
return rc;
@@ -929,19 +954,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
u32 msg_size)
{
- int rc = 0;
- struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_input *req;
+ int rc;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
- /* Set the new target id */
- req.target_id = cpu_to_le16(vf->fw_fid);
- req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
- memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+ rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
+ if (!rc) {
+ /* Set the new target id */
+ req->target_id = cpu_to_le16(vf->fw_fid);
+ req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
return rc;
@@ -950,19 +977,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
u32 msg_size)
{
- int rc = 0;
- struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_input *req;
+ int rc;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
- /* Set the new target id */
- req.target_id = cpu_to_le16(vf->fw_fid);
- req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
- memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+ rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
+ if (!rc) {
+ /* Set the new target id */
+ req->target_id = cpu_to_le16(vf->fw_fid);
+ req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
return rc;
@@ -1037,15 +1066,15 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
- mutex_lock(&bp->hwrm_cmd_lock);
+ mutex_lock(&bp->link_lock);
memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
sizeof(phy_qcfg_resp));
- mutex_unlock(&bp->hwrm_cmd_lock);
+ mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
phy_qcfg_resp.valid = 1;
@@ -1127,38 +1156,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
-void bnxt_update_vf_mac(struct bnxt *bp)
-{
- struct hwrm_func_qcaps_input req = {0};
- struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
- req.fid = cpu_to_le16(0xffff);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
- goto update_vf_mac_exit;
-
- /* Store MAC address from the firmware. There are 2 cases:
- * 1. MAC address is valid. It is assigned from the PF and we
- * need to override the current VF MAC address with it.
- * 2. MAC address is zero. The VF will use a random MAC address by
- * default but the stored zero MAC will allow the VF user to change
- * the random MAC address using ndo_set_mac_address() if he wants.
- */
- if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
- memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
-
- /* overwrite netdev dev_addr with admin VF MAC */
- if (is_valid_ether_addr(bp->vf.mac_addr))
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
-update_vf_mac_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
-}
-
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
- struct hwrm_func_vf_cfg_input req = {0};
+ struct hwrm_func_vf_cfg_input *req;
int rc = 0;
if (!BNXT_VF(bp))
@@ -1169,10 +1169,16 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
rc = -EADDRNOTAVAIL;
goto mac_done;
}
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
- req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
- memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
+ if (rc)
+ goto mac_done;
+
+ req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
+ if (!strict)
+ hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
+ rc = hwrm_req_send(bp, req);
mac_done:
if (rc && strict) {
rc = -EADDRNOTAVAIL;
@@ -1182,6 +1188,47 @@ mac_done:
}
return 0;
}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ bool inform_pf = false;
+
+ if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+
+ resp = hwrm_req_hold(bp, req);
+ if (hwrm_req_send(bp, req))
+ goto update_vf_mac_exit;
+
+ /* Store MAC address from the firmware. There are 2 cases:
+ * 1. MAC address is valid. It is assigned from the PF and we
+ * need to override the current VF MAC address with it.
+ * 2. MAC address is zero. The VF will use a random MAC address by
+ * default but the stored zero MAC will allow the VF user to change
+ * the random MAC address using ndo_set_mac_address() if he wants.
+ */
+ if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
+ memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
+ /* This means we are now using our own MAC address, let
+ * the PF know about this MAC address.
+ */
+ if (!is_valid_ether_addr(bp->vf.mac_addr))
+ inform_pf = true;
+ }
+
+ /* overwrite netdev dev_addr with admin VF MAC */
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
+update_vf_mac_exit:
+ hwrm_req_drop(bp, req);
+ if (inform_pf)
+ bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+}
+
#else
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
@@ -1204,7 +1251,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 629641bf6fc5..9a4bacba477b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -34,11 +34,12 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 9bec256b0934..d8afcf8d6b30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -22,6 +22,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
#include "bnxt_tc.h"
#include "bnxt_vfr.h"
@@ -279,7 +280,8 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
/* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
* smac (6 bytes) if rewrite of both is specified, otherwise either
@@ -299,6 +301,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -491,22 +496,25 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
+ tc_flow_cmd->common.extack);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
- struct hwrm_cfa_flow_free_input req = { 0 };
+ struct hwrm_cfa_flow_free_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
- if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
- req.ext_flow_handle = flow_node->ext_flow_handle;
- else
- req.flow_handle = flow_node->flow_handle;
+ rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE);
+ if (!rc) {
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req->ext_flow_handle = flow_node->ext_flow_handle;
+ else
+ req->flow_handle = flow_node->flow_handle;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@@ -582,20 +590,22 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
- struct hwrm_cfa_flow_alloc_input req = { 0 };
struct hwrm_cfa_flow_alloc_output *resp;
+ struct hwrm_cfa_flow_alloc_input *req;
u16 flow_flags = 0, action_flags = 0;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC);
+ if (rc)
+ return rc;
- req.src_fid = cpu_to_le16(flow->src_fid);
- req.ref_flow_handle = ref_flow_handle;
+ req->src_fid = cpu_to_le16(flow->src_fid);
+ req->ref_flow_handle = ref_flow_handle;
if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
- memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+ memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac,
ETH_ALEN);
- memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+ memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac,
ETH_ALEN);
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
@@ -610,71 +620,71 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
/* L3 source rewrite */
- req.nat_ip_address[0] =
+ req->nat_ip_address[0] =
actions->nat.l3.ipv4.saddr.s_addr;
/* L4 source port */
if (actions->nat.l4.ports.sport)
- req.nat_port =
+ req->nat_port =
actions->nat.l4.ports.sport;
} else {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
/* L3 destination rewrite */
- req.nat_ip_address[0] =
+ req->nat_ip_address[0] =
actions->nat.l3.ipv4.daddr.s_addr;
/* L4 destination port */
if (actions->nat.l4.ports.dport)
- req.nat_port =
+ req->nat_port =
actions->nat.l4.ports.dport;
}
netdev_dbg(bp->dev,
- "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
- req.nat_ip_address, actions->nat.src_xlate,
- req.nat_port);
+ "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n",
+ req->nat_ip_address, actions->nat.src_xlate,
+ req->nat_port);
} else {
if (actions->nat.src_xlate) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
/* L3 source rewrite */
- memcpy(req.nat_ip_address,
+ memcpy(req->nat_ip_address,
actions->nat.l3.ipv6.saddr.s6_addr32,
- sizeof(req.nat_ip_address));
+ sizeof(req->nat_ip_address));
/* L4 source port */
if (actions->nat.l4.ports.sport)
- req.nat_port =
+ req->nat_port =
actions->nat.l4.ports.sport;
} else {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
/* L3 destination rewrite */
- memcpy(req.nat_ip_address,
+ memcpy(req->nat_ip_address,
actions->nat.l3.ipv6.daddr.s6_addr32,
- sizeof(req.nat_ip_address));
+ sizeof(req->nat_ip_address));
/* L4 destination port */
if (actions->nat.l4.ports.dport)
- req.nat_port =
+ req->nat_port =
actions->nat.l4.ports.dport;
}
netdev_dbg(bp->dev,
- "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
- req.nat_ip_address, actions->nat.src_xlate,
- req.nat_port);
+ "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n",
+ req->nat_ip_address, actions->nat.src_xlate,
+ req->nat_port);
}
}
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
- req.tunnel_handle = tunnel_handle;
+ req->tunnel_handle = tunnel_handle;
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
}
- req.ethertype = flow->l2_key.ether_type;
- req.ip_proto = flow->l4_key.ip_proto;
+ req->ethertype = flow->l2_key.ether_type;
+ req->ip_proto = flow->l4_key.ip_proto;
if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
- memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
- memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
+ memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN);
+ memcpy(req->smac, flow->l2_key.smac, ETH_ALEN);
}
if (flow->l2_key.num_vlans > 0) {
@@ -683,7 +693,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
* in outer_vlan_tci when num_vlans is 1 (which is
* always the case in TC.)
*/
- req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
+ req->outer_vlan_tci = flow->l2_key.inner_vlan_tci;
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
@@ -696,68 +706,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
- req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
- req.ip_dst_mask_len =
+ req->ip_dst[0] = l3_key->ipv4.daddr.s_addr;
+ req->ip_dst_mask_len =
inet_mask_len(l3_mask->ipv4.daddr.s_addr);
- req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
- req.ip_src_mask_len =
+ req->ip_src[0] = l3_key->ipv4.saddr.s_addr;
+ req->ip_src_mask_len =
inet_mask_len(l3_mask->ipv4.saddr.s_addr);
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
- memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
- sizeof(req.ip_dst));
- req.ip_dst_mask_len =
+ memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32,
+ sizeof(req->ip_dst));
+ req->ip_dst_mask_len =
ipv6_mask_len(&l3_mask->ipv6.daddr);
- memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
- sizeof(req.ip_src));
- req.ip_src_mask_len =
+ memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32,
+ sizeof(req->ip_src));
+ req->ip_src_mask_len =
ipv6_mask_len(&l3_mask->ipv6.saddr);
}
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
- req.l4_src_port = flow->l4_key.ports.sport;
- req.l4_src_port_mask = flow->l4_mask.ports.sport;
- req.l4_dst_port = flow->l4_key.ports.dport;
- req.l4_dst_port_mask = flow->l4_mask.ports.dport;
+ req->l4_src_port = flow->l4_key.ports.sport;
+ req->l4_src_port_mask = flow->l4_mask.ports.sport;
+ req->l4_dst_port = flow->l4_key.ports.dport;
+ req->l4_dst_port_mask = flow->l4_mask.ports.dport;
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
/* l4 ports serve as type/code when ip_proto is ICMP */
- req.l4_src_port = htons(flow->l4_key.icmp.type);
- req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
- req.l4_dst_port = htons(flow->l4_key.icmp.code);
- req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
+ req->l4_src_port = htons(flow->l4_key.icmp.type);
+ req->l4_src_port_mask = htons(flow->l4_mask.icmp.type);
+ req->l4_dst_port = htons(flow->l4_key.icmp.code);
+ req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
}
- req.flags = cpu_to_le16(flow_flags);
+ req->flags = cpu_to_le16(flow_flags);
if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
} else {
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
- req.dst_fid = cpu_to_le16(actions->dst_fid);
+ req->dst_fid = cpu_to_le16(actions->dst_fid);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
- req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
- req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
- memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
- memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+ req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
+ req->l2_rewrite_vlan_tci = actions->push_vlan_tci;
+ memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
+ memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
/* Rewrite config with tpid = 0 implies vlan pop */
- req.l2_rewrite_vlan_tpid = 0;
- memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
- memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+ req->l2_rewrite_vlan_tpid = 0;
+ memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
+ memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
}
}
- req.action_flags = cpu_to_le16(action_flags);
+ req->action_flags = cpu_to_le16(action_flags);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc) {
- resp = bnxt_get_hwrm_resp_addr(bp, &req);
/* CFA_FLOW_ALLOC response interpretation:
* fw with fw with
* 16-bit 64-bit
@@ -773,7 +782,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
flow_node->flow_id = resp->flow_id;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -783,67 +792,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle,
__le32 *decap_filter_handle)
{
- struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key;
+ struct hwrm_cfa_decap_filter_alloc_input *req;
u32 enables = 0;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC);
+ if (rc)
+ goto exit;
- req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
+ req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
- req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
- req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
+ req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
/* tunnel_id is wrongly defined in hsi defn. as __le32 */
- req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
+ req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
- ether_addr_copy(req.dst_macaddr, l2_info->dmac);
+ ether_addr_copy(req->dst_macaddr, l2_info->dmac);
}
if (l2_info->num_vlans) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
- req.t_ivlan_vid = l2_info->inner_vlan_tci;
+ req->t_ivlan_vid = l2_info->inner_vlan_tci;
}
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
- req.ethertype = htons(ETH_P_IP);
+ req->ethertype = htons(ETH_P_IP);
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
- req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
- req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
- req.src_ipaddr[0] = tun_key->u.ipv4.src;
+ req->ip_addr_type =
+ CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+ req->dst_ipaddr[0] = tun_key->u.ipv4.dst;
+ req->src_ipaddr[0] = tun_key->u.ipv4.src;
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
- req.dst_port = tun_key->tp_dst;
+ req->dst_port = tun_key->tp_dst;
}
/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
* is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
*/
- req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
- req.enables = cpu_to_le32(enables);
+ req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
+ req->enables = cpu_to_le32(enables);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc) {
- resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
*decap_filter_handle = resp->decap_filter_id;
- } else {
+ hwrm_req_drop(bp, req);
+exit:
+ if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
- }
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -851,13 +862,14 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
__le32 decap_filter_handle)
{
- struct hwrm_cfa_decap_filter_free_input req = { 0 };
+ struct hwrm_cfa_decap_filter_free_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
- req.decap_filter_id = decap_filter_handle;
-
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE);
+ if (!rc) {
+ req->decap_filter_id = decap_filter_handle;
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@@ -869,18 +881,18 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle)
{
- struct hwrm_cfa_encap_record_alloc_input req = { 0 };
struct hwrm_cfa_encap_record_alloc_output *resp;
- struct hwrm_cfa_encap_data_vxlan *encap =
- (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
- struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
- (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
+ struct hwrm_cfa_encap_record_alloc_input *req;
+ struct hwrm_cfa_encap_data_vxlan *encap;
+ struct hwrm_vxlan_ipv4_hdr *encap_ipv4;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
-
- req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
+ rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC);
+ if (rc)
+ goto exit;
+ encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data;
+ req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
ether_addr_copy(encap->src_mac_addr, l2_info->smac);
if (l2_info->num_vlans) {
@@ -889,6 +901,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
encap->ovlan_tpid = l2_info->inner_vlan_tpid;
}
+ encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
encap_ipv4->ttl = encap_key->ttl;
@@ -900,15 +913,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
encap->dst_port = encap_key->tp_dst;
encap->vni = tunnel_id_to_key32(encap_key->tun_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc) {
- resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
*encap_record_handle = resp->encap_record_id;
- } else {
+ hwrm_req_drop(bp, req);
+exit:
+ if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
- }
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -916,13 +928,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
static int hwrm_cfa_encap_record_free(struct bnxt *bp,
__le32 encap_record_handle)
{
- struct hwrm_cfa_encap_record_free_input req = { 0 };
+ struct hwrm_cfa_encap_record_free_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
- req.encap_record_id = encap_record_handle;
-
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE);
+ if (!rc) {
+ req->encap_record_id = encap_record_handle;
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@@ -1633,8 +1646,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
lastused = flow->lastused;
spin_unlock(&flow->stats_lock);
- flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
- lastused);
+ flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0,
+ lastused, FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
@@ -1668,14 +1681,20 @@ static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
- struct hwrm_cfa_flow_stats_input req = { 0 };
struct hwrm_cfa_flow_stats_output *resp;
- __le16 *req_flow_handles = &req.flow_handle_0;
- __le32 *req_flow_ids = &req.flow_id_0;
+ struct hwrm_cfa_flow_stats_input *req;
+ __le16 *req_flow_handles;
+ __le32 *req_flow_ids;
int rc, i;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
- req.num_flows = cpu_to_le16(num_flows);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS);
+ if (rc)
+ goto exit;
+
+ req_flow_handles = &req->flow_handle_0;
+ req_flow_ids = &req->flow_id_0;
+
+ req->num_flows = cpu_to_le16(num_flows);
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
@@ -1683,13 +1702,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
&req_flow_handles[i], &req_flow_ids[i]);
}
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
__le64 *resp_packets;
__le64 *resp_bytes;
- resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp_packets = &resp->packet_0;
resp_bytes = &resp->byte_0;
@@ -1699,10 +1717,11 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
stats_batch[i].hw_stats.bytes =
le64_to_cpu(resp_bytes[i]);
}
- } else {
- netdev_info(bp->dev, "error rc=%d\n", rc);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
+exit:
+ if (rc)
+ netdev_info(bp->dev, "error rc=%d\n", rc);
return rc;
}
@@ -1849,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
struct flow_cls_offload *flower = type_data;
struct bnxt *bp = priv->bp;
- if (flower->common.chain_index)
+ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -1865,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev)
return cb_priv;
@@ -1883,8 +1899,9 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
kfree(priv);
}
-static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
- struct flow_block_offload *f)
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
struct flow_block_cb *block_cb;
@@ -1902,9 +1919,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
cb_priv->bp = bp;
list_add(&cb_priv->list, &bp->tc_indr_block_list);
- block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
- cb_priv, cb_priv,
- bnxt_tc_setup_indr_rel);
+ block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel, f,
+ netdev, sch, data, bp, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
@@ -1925,7 +1943,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
break;
default:
@@ -1934,53 +1952,27 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
return 0;
}
-static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
-{
- switch (type) {
- case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
{
return netif_is_vxlan(netdev);
}
-static int bnxt_tc_indr_block_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
- struct net_device *netdev;
- struct bnxt *bp;
- int rc;
-
- netdev = netdev_notifier_info_to_dev(ptr);
- if (!bnxt_is_netdev_indr_offload(netdev))
- return NOTIFY_OK;
-
- bp = container_of(nb, struct bnxt, tc_netdev_nb);
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
+ return -EOPNOTSUPP;
- switch (event) {
- case NETDEV_REGISTER:
- rc = __flow_indr_block_cb_register(netdev, bp,
- bnxt_tc_setup_indr_cb,
- bp);
- if (rc)
- netdev_info(bp->dev,
- "Failed to register indirect blk: dev: %s\n",
- netdev->name);
- break;
- case NETDEV_UNREGISTER:
- __flow_indr_block_cb_unregister(netdev,
- bnxt_tc_setup_indr_cb,
- bp);
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
+ default:
break;
}
- return NOTIFY_DONE;
+ return -EOPNOTSUPP;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
@@ -2019,11 +2011,8 @@ int bnxt_init_tc(struct bnxt *bp)
struct bnxt_tc_info *tc_info;
int rc;
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev,
- "Firmware does not support TC flower offload.\n");
- return -ENOTSUPP;
- }
+ if (bp->hwrm_spec_code < 0x10803)
+ return 0;
tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
if (!tc_info)
@@ -2069,8 +2058,8 @@ int bnxt_init_tc(struct bnxt *bp)
/* init indirect block notifications */
INIT_LIST_HEAD(&bp->tc_indr_block_list);
- bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
- rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+
+ rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp);
if (!rc)
return 0;
@@ -2096,7 +2085,8 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
- unregister_netdevice_notifier(&bp->tc_netdev_nb);
+ flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
+ bnxt_tc_setup_indr_rel);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 4a316c4b3fa8..2e54bf4fc7a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -22,9 +22,10 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct net_device *dev = edev->net;
@@ -61,7 +62,7 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
return 0;
}
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -104,11 +105,17 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ if (BNXT_VF(bp))
+ ent[i].db_offset = DB_VF_OFFSET_P5;
+ } else {
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
@@ -172,7 +179,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return avail_msix;
}
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -216,38 +223,48 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct output *resp;
struct input *req;
+ u32 resp_len;
int rc;
if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
return -EBUSY;
- mutex_lock(&bp->hwrm_cmd_lock);
- req = fw_msg->msg;
- req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
- rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
- fw_msg->timeout);
- if (!rc) {
- struct output *resp = bp->hwrm_cmd_resp_addr;
- u32 len = le16_to_cpu(resp->resp_len);
+ rc = hwrm_req_init(bp, req, 0 /* don't care */);
+ if (rc)
+ return rc;
+
+ rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
+ if (rc)
+ return rc;
- if (fw_msg->resp_max_len < len)
- len = fw_msg->resp_max_len;
+ hwrm_req_timeout(bp, req, fw_msg->timeout);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ resp_len = le16_to_cpu(resp->resp_len);
+ if (resp_len) {
+ if (fw_msg->resp_max_len < resp_len)
+ resp_len = fw_msg->resp_max_len;
- memcpy(fw_msg->resp, resp, len);
+ memcpy(fw_msg->resp, resp, resp_len);
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -430,7 +447,7 @@ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
rcu_read_unlock();
}
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
@@ -469,13 +486,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
if (!edev)
return ERR_PTR(-ENOMEM);
edev->en_ops = &bnxt_en_ops_tbl;
- if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
- if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
+ edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
return bp->edev;
}
+EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 9895406b9830..42b50abc3e91 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -11,8 +11,7 @@
#define BNXT_ULP_H
#define BNXT_ROCE_ULP 0
-#define BNXT_OTHER_ULP 1
-#define BNXT_MAX_ULP 2
+#define BNXT_MAX_ULP 1
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
@@ -67,18 +66,26 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
};
struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
unsigned long *, u16);
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 6f2faf81c1ae..fcc65890820a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -15,6 +15,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_tc.h"
@@ -27,38 +28,40 @@
static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
u16 *tx_cfa_action, u16 *rx_cfa_code)
{
- struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_vfr_alloc_input req = { 0 };
+ struct hwrm_cfa_vfr_alloc_output *resp;
+ struct hwrm_cfa_vfr_alloc_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
- req.vf_id = cpu_to_le16(vf_idx);
- sprintf(req.vfr_name, "vfr%d", vf_idx);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC);
if (!rc) {
- *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
- *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
- netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
- *tx_cfa_action, *rx_cfa_code);
- } else {
- netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
+ req->vf_id = cpu_to_le16(vf_idx);
+ sprintf(req->vfr_name, "vfr%d", vf_idx);
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
+ *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
+ netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
+ *tx_cfa_action, *rx_cfa_code);
+ }
+ hwrm_req_drop(bp, req);
}
-
- mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
{
- struct hwrm_cfa_vfr_free_input req = { 0 };
+ struct hwrm_cfa_vfr_free_input *req;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
- sprintf(req.vfr_name, "vfr%d", vf_idx);
-
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE);
+ if (!rc) {
+ sprintf(req->vfr_name, "vfr%d", vf_idx);
+ rc = hwrm_req_send(bp, req);
+ }
if (rc)
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
@@ -67,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
u16 *max_mtu)
{
- struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
u16 mtu;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
-
- mutex_lock(&bp->hwrm_cmd_lock);
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
if (!rc) {
mtu = le16_to_cpu(resp->max_mtu_configured);
if (!mtu)
@@ -85,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
else
*max_mtu = mtu;
}
- mutex_unlock(&bp->hwrm_cmd_lock);
+ hwrm_req_drop(bp, req);
return rc;
}
@@ -218,8 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
@@ -285,8 +288,26 @@ void bnxt_vf_reps_open(struct bnxt *bp)
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
- for (i = 0; i < pci_num_vf(bp->pdev); i++)
- bnxt_vf_rep_open(bp->vf_reps[i]->dev);
+ for (i = 0; i < pci_num_vf(bp->pdev); i++) {
+ /* Open the VF-Rep only if it is allocated in the FW */
+ if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID)
+ bnxt_vf_rep_open(bp->vf_reps[i]->dev);
+ }
+}
+
+static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep)
+{
+ if (!vf_rep)
+ return;
+
+ if (vf_rep->dst) {
+ dst_release((struct dst_entry *)vf_rep->dst);
+ vf_rep->dst = NULL;
+ }
+ if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) {
+ hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
+ vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
+ }
}
static void __bnxt_vf_reps_destroy(struct bnxt *bp)
@@ -298,11 +319,7 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp)
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
if (vf_rep) {
- dst_release((struct dst_entry *)vf_rep->dst);
-
- if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
- hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
-
+ __bnxt_free_one_vf_rep(bp, vf_rep);
if (vf_rep->dev) {
/* if register_netdev failed, then netdev_ops
* would have been set to NULL
@@ -351,6 +368,80 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
__bnxt_vf_reps_destroy(bp);
}
+/* Free the VF-Reps in firmware, during firmware hot-reset processing.
+ * Note that the VF-Rep netdevs are still active (not unregistered) during
+ * this process. As the mode transition from SWITCHDEV to LEGACY happens
+ * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ */
+void bnxt_vf_reps_free(struct bnxt *bp)
+{
+ u16 num_vfs = pci_num_vf(bp->pdev);
+ int i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ for (i = 0; i < num_vfs; i++)
+ __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]);
+}
+
+static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
+ u16 *cfa_code_map)
+{
+ /* get cfa handles from FW */
+ if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action,
+ &vf_rep->rx_cfa_code))
+ return -ENOLINK;
+
+ cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
+ vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ if (!vf_rep->dst)
+ return -ENOMEM;
+
+ /* only cfa_action is needed to mux a packet while TXing */
+ vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
+ vf_rep->dst->u.port_info.lower_dev = bp->dev;
+
+ return 0;
+}
+
+/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
+ * Note that the VF-Rep netdevs are still active (not unregistered) during
+ * this process. As the mode transition from SWITCHDEV to LEGACY happens
+ * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ */
+int bnxt_vf_reps_alloc(struct bnxt *bp)
+{
+ u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev);
+ struct bnxt_vf_rep *vf_rep;
+ int rc, i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ if (!cfa_code_map)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_CFA_CODE; i++)
+ cfa_code_map[i] = VF_IDX_INVALID;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_rep = bp->vf_reps[i];
+ vf_rep->vf_idx = i;
+
+ rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
+ bnxt_vf_reps_free(bp);
+ return rc;
+}
+
/* Use the OUI of the PF's perm addr and report the same mac addr
* for the same VF-rep each time
*/
@@ -384,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
- ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ eth_hw_addr_set(dev, dev->perm_addr);
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
dev->max_mtu = max_mtu;
@@ -429,25 +520,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
vf_rep->vf_idx = i;
vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
- /* get cfa handles from FW */
- rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
- &vf_rep->tx_cfa_action,
- &vf_rep->rx_cfa_code);
- if (rc) {
- rc = -ENOLINK;
- goto err;
- }
- cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
-
- vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
- GFP_KERNEL);
- if (!vf_rep->dst) {
- rc = -ENOMEM;
+ rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
+ if (rc)
goto err;
- }
- /* only cfa_action is needed to mux a packet while TXing */
- vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
- vf_rep->dst->u.port_info.lower_dev = bp->dev;
bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
rc = register_netdev(dev);
@@ -484,44 +559,34 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
- break;
+ return 0;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
+ return -EPERM;
}
- rc = bnxt_vf_reps_create(bp);
- break;
+ return bnxt_vf_reps_create(bp);
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index d7287651422f..5637a84884d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -19,6 +19,8 @@ void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
+int bnxt_vf_reps_alloc(struct bnxt *bp);
+void bnxt_vf_reps_free(struct bnxt *bp);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
@@ -61,5 +63,15 @@ static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
{
return false;
}
+
+static inline int bnxt_vf_reps_alloc(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_vf_reps_free(struct bnxt *bp)
+{
+}
+
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c6f6f2033880..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -20,38 +20,95 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo;
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
+
+ /* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ flags = (len << TX_BD_LEN_SHIFT) |
+ ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+ return NULL;
+
+ dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
txr->tx_prod = prod;
+
return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -61,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -76,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int i, j, frags;
for (i = 0; i < nr_pkts; i++) {
tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -87,13 +144,20 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
dma_unmap_len(tx_buf, len),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
xdp_return_frame(tx_buf->xdpf);
tx_buf->action = 0;
tx_buf->xdpf = NULL;
} else if (tx_buf->action == XDP_TX) {
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
}
tx_cons = NEXT_TX(tx_cons);
}
@@ -101,7 +165,56 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
+ }
+}
+
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp)
+{
+ struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
}
+ shinfo->nr_frags = 0;
}
/* returns the following:
@@ -109,14 +222,14 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -126,23 +239,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
- rcu_read_lock();
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- rcu_read_unlock();
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -152,26 +255,38 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
+ if (orig_data != xdp.data)
offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
- }
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event = 0;
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -179,6 +294,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -186,6 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -199,12 +317,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_REDIRECT_EVENT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
- /* Fall thru */
+ bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
- /* Fall thru */
+ fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -219,7 +338,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
dma_addr_t mapping;
- int drops = 0;
+ int nxmit = 0;
int ring;
int i;
@@ -231,25 +350,26 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
+ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
- if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
- xdp_return_frame_rx_napi(xdp);
- drops++;
- continue;
- }
+ if (!bnxt_tx_avail(bp, txr))
+ break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- xdp_return_frame_rx_napi(xdp);
- drops++;
- continue;
- }
+ if (dma_mapping_error(&pdev->dev, mapping))
+ break;
+
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
@@ -258,7 +378,10 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
- return num_frames - drops;
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_unlock(&txr->xdp_tx_lock);
+
+ return nxmit;
}
/* Under rtnl_lock */
@@ -268,8 +391,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
int tx_xdp = 0, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
@@ -277,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
@@ -329,13 +455,32 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
rc = bnxt_xdp_set(bp, xdp->prog);
break;
- case XDP_QUERY_PROG:
- xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
- rc = 0;
- break;
default:
rc = -EINVAL;
break;
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ PAGE_SIZE * sinfo->nr_frags,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..505911ae095d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,15 +10,29 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff xdp, struct page *page, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1);
#endif