aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c485
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c668
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c614
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c18
18 files changed, 1820 insertions, 462 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326cfda93..4d9cf39da48c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
};
/* below are per-VF mac-vlan subcodes */
@@ -85,6 +88,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b40606dbc..4a39feaba8b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -124,7 +128,10 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
@@ -132,6 +139,11 @@ enum hnae3_reset_type {
HNAE3_NONE_RESET,
};
+enum hnae3_flr_state {
+ HNAE3_FLR_DOWN,
+ HNAE3_FLR_DONE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -162,6 +174,7 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
+ enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +210,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
@@ -292,17 +309,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +425,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -430,6 +454,10 @@ struct hnae3_ae_ops {
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
};
struct hnae3_dcb_ops {
@@ -488,6 +516,14 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
};
struct hnae3_unic_private_info {
@@ -520,9 +556,6 @@ struct hnae3_handle {
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
- unsigned long last_reset_time;
- enum hnae3_reset_type reset_level;
-
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a84070d..b6fabbbdfd5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d1c2ce..7f81db3df041 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -312,6 +313,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
return min_t(u16, rss_size, max_rss_size);
}
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +353,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret)
@@ -344,6 +367,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
return 0;
out_start_err:
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
@@ -354,11 +380,13 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo;
int i, ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,24 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo->prio_tc[i]);
}
- priv->ae_handle->last_reset_time = jiffies;
return 0;
}
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
- if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
- return;
-
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -408,6 +437,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1312,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- bool if_running = netif_running(netdev);
int ret;
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
- /* if this was called with netdev up then bring netdevice down */
- if (if_running) {
- (void)hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else
netdev->mtu = new_mtu;
- /* if the netdev was running earlier, bring it up again */
- if (if_running && hns3_nic_net_open(netdev))
- ret = -EINVAL;
-
return ret;
}
@@ -1615,10 +1647,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
priv->tx_timeout_count++;
- if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
- return;
-
- /* request the reset */
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
if (h->ae_algo->ops->reset_event)
h->ae_algo->ops->reset_event(h->pdev, h);
}
@@ -1682,8 +1713,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1819,9 +1852,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
@@ -1875,7 +1928,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2308,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2296,6 +2357,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2329,6 +2393,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
@@ -2345,18 +2556,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2577,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2589,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
-
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2674,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2497,9 +2691,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2707,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
@@ -2669,6 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
@@ -2677,6 +2880,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2701,9 +2909,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- napi_complete(napi);
- hns3_update_new_int_gl(tqp_vector);
- hns3_mask_vector_irq(tqp_vector, 1);
+ if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
+ napi_complete(napi)) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
return rx_pkt_total;
}
@@ -3319,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3563,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
@@ -3357,11 +3582,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (handle->flags & HNAE3_SUPPORT_VF)
- handle->reset_level = HNAE3_VF_RESET;
- else
- handle->reset_level = HNAE3_FUNC_RESET;
-
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3392,10 +3612,18 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_reg_netdev_fail;
+ }
+
hns3_dcbnl_setup(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
- netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -3418,11 +3646,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_client_stop(handle);
+
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
@@ -3443,6 +3678,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
+out_netdev_free:
free_netdev(netdev);
}
@@ -3708,8 +3944,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(ndev);
+ hns3_del_all_fd_rules(ndev, false);
+ }
if (!netif_running(ndev))
return 0;
@@ -3720,6 +3970,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (netif_running(kinfo->netdev)) {
@@ -3729,9 +3980,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
"hns net up fail, ret=%d!\n", ret);
return ret;
}
- handle->last_reset_time = jiffies;
}
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
return ret;
}
@@ -3771,28 +4023,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ return ret;
+
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
- return ret;
+ goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
- if (ret) {
- hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
- }
+ if (ret)
+ goto err_uninit_vector;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4071,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
+ ret = hns3_nic_dealloc_vector_data(priv);
+ if (ret)
+ netdev_err(netdev, "dealloc vector error\n");
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(netdev);
- hns3_del_all_fd_rules(netdev, false);
- }
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d088aa3..bbf227ba30f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
- HNS3_NIC_STATE_REINITING,
+ HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
-#define HNS3_RING_RX_VM_REG 0x00090
+#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8
-#define HNS3_MAX_MTU 9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_RING_EN_B 0
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
@@ -577,6 +593,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +607,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_FLR_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+ ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2b8ba1..4563638367ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
int test_index = 0;
u32 i;
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_desc_num, new_desc_num;
int ret;
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62ed87dc..8af0cef5609b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..aef044d08b11 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -152,6 +152,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -758,6 +759,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e653f3..6da9e22d82d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -818,7 +818,6 @@ static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
enum hclge_err_int_type int_type)
{
- enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
struct device *dev = &hdev->pdev->dev;
const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
struct hclge_desc desc[2];
@@ -848,23 +847,17 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
}
err_sts = le32_to_cpu(desc[0].data[2]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst1, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
err_sts = le32_to_cpu(desc[0].data[3]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst2, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
}
/* clear PPP INT */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd96020860..f78b8e188443 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,7 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -1144,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1947,10 +1970,7 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev)
{
- struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- int mtu;
int ret;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
@@ -1964,15 +1984,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- if (netdev)
- mtu = netdev->mtu;
- else
- mtu = ETH_DATA_LEN;
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
- ret = hclge_set_mtu(handle, mtu);
+ ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
- "set mtu failed ret=%d\n", ret);
+ "allocate buffer fail, ret=%d\n", ret);
return ret;
}
@@ -2144,7 +2165,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,18 +2182,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "core reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
@@ -2308,21 +2333,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
int ret;
ret = client->ops->reset_notify(handle, type);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
return ret;
+ }
}
return 0;
}
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret = 0;
+ u16 i;
+
+ if (!client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 5
+#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2395,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
+ case HNAE3_FLR_RESET:
+ break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2404,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
+ if (hdev->reset_type == HNAE3_FLR_RESET) {
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < HCLGE_RESET_WAIT_CNT)
+ msleep(HCLGE_RESET_WATI_MS);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout: %d\n", cnt);
+ return -EBUSY;
+ }
+
+ return 0;
+ }
+
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2434,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset)
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%d) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -2396,11 +2521,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
- hclge_func_reset_cmd(hdev, 0);
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
+ case HNAE3_FLR_RESET:
+ dev_info(&pdev->dev, "FLR requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -2414,20 +2544,28 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
/* return the highest priority reset level amongst all */
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
- else if (test_bit(HNAE3_CORE_RESET, addr))
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_CORE_RESET, addr)) {
rst_level = HNAE3_CORE_RESET;
- else if (test_bit(HNAE3_IMP_RESET, addr))
- rst_level = HNAE3_IMP_RESET;
- else if (test_bit(HNAE3_FUNC_RESET, addr))
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
-
- /* now, clear all other resets */
- clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_IMP_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
return rst_level;
}
@@ -2457,39 +2595,206 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+ (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+ BIT(HCLGE_IMP_RESET_BIT))) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because IMP Reset is pending\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->reset_fail_cnt++;
+ if (is_timeout) {
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule to wait for hw reset done\n");
+ return true;
+ }
+
+ dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+ hclge_clear_reset_cause(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+ return false;
+ }
+
+ hclge_clear_reset_cause(hdev);
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+ return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hnae3_handle *handle;
+ bool is_timeout = false;
+ int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
+ hdev->last_reset_time = jiffies;
/* perform reset of the stack & ae device for a client */
- handle = &hdev->vport[0].nic;
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_reset_prepare_down(hdev);
+ if (ret)
+ goto err_reset;
+
rtnl_lock();
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
- if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclge_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
- hclge_clear_reset_cause(hdev);
- } else {
- rtnl_lock();
- /* schedule again to check pending resets later */
- set_bit(hdev->reset_type, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
+ if (hclge_reset_wait(hdev)) {
+ is_timeout = true;
+ goto err_reset;
}
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- handle->last_reset_time = jiffies;
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ return;
+
+err_reset_lock:
rtnl_unlock();
- ae_dev->reset_type = HNAE3_NONE_RESET;
+err_reset:
+ if (hclge_reset_err_handle(hdev, is_timeout))
+ hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2820,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
return;
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
- handle->reset_level = HNAE3_FUNC_RESET;
+ else if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclge_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ hdev->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
- handle->reset_level);
+ hdev->reset_level);
/* request reset & schedule reset task */
- set_bit(handle->reset_level, &hdev->reset_request);
+ set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
- handle->reset_level++;
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ dev_info(&hdev->pdev->dev,
+ "triggering global reset in reset timer\n");
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2869,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* b. else, we can come back later to check this status so re-sched
* now.
*/
+ hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
@@ -2584,6 +2912,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
@@ -2596,6 +2941,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
+ hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
}
@@ -4336,8 +4682,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hlist_node *node;
int ret;
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
+ return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +4942,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return 0;
}
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->reset_count;
+}
+
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4805,10 +5180,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
-
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, true);
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
@@ -4828,7 +5199,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -4836,14 +5206,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
cancel_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ /* If it is not PF reset, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+ hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
return;
}
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, false);
-
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -4856,6 +5227,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_update_link_status(hdev);
}
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6400,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
- int max_frm_size;
- int ret;
-
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
- return -EINVAL;
-
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- else
- hdev->mps = max_frm_size;
-
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
struct hclge_dev *hdev = vport->back;
- int ret;
+ int i, max_frm_size, ret = 0;
+
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- ret = hclge_set_mac_mtu(hdev, new_mtu);
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
- return ret;
+ goto out;
}
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
@@ -6250,7 +6669,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7031,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
@@ -6620,6 +7041,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS 100
+#define HCLGE_FLR_WAIT_CNT 50
+ struct hclge_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGE_FLR_WAIT_CNT)
+ msleep(HCLGE_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7084,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -6727,6 +7180,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6769,6 +7226,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7237,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, true);
hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -6806,6 +7265,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_start(vport);
+ vport++;
+ }
+}
+
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -6856,6 +7326,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6887,6 +7361,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (hclge_enable_tm_hw_error(hdev, true))
dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+ hclge_reset_vport_state(hdev);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6913,6 +7389,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -7272,9 +7749,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7772,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7810,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
@@ -7337,6 +7827,10 @@ static const struct hnae3_ae_ops hclge_ops = {
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.process_hw_error = hclge_process_ras_hw_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d9215404269..5f24dd41d7eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -97,11 +97,13 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -115,8 +117,10 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+
#define HCLGE_MAC_DEFAULT_FRAME \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
@@ -593,10 +597,16 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
+ unsigned long reset_count; /* the number of reset has been done */
+ u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -644,6 +654,7 @@ struct hclge_dev {
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
+ struct timer_list reset_timer;
struct work_struct service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -667,6 +678,8 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg;
@@ -717,6 +730,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en;
};
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAX
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -742,6 +760,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +790,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
@@ -777,9 +805,13 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022938d9..e16a730a5f54 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+ enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ return -EINVAL;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status;
}
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ bool alive = !!mbx_req->msg[2];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -363,24 +389,28 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
- mbx_req->mbx_src_vfid);
-
- /* Acknowledge VF that PF is now about to assert the reset for the VF.
- * On receiving this message VF will get into pending state and will
- * start polling for the hardware reset completion status.
- */
- ret = hclge_inform_reset_assert_to_vf(vport);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
- ret, vport->vport_id);
- return;
- }
+ vport->vport_id);
- dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
- mbx_req->mbx_src_vfid);
- /* reset this virtual function */
- hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+ ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+ hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ int ret;
+ u32 mtu;
+
+ memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+ ret = hclge_set_vport_mtu(vport, mtu);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true);
if (ret)
@@ -487,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req);
break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport, req);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03018638f701..741cb3b9519d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
int ret;
if (!phydev)
return 0;
- phydev->supported &= ~SUPPORTED_FIBRE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
@@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
phy_support_asym_pause(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445f6799..d5765c8cf3a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false;
}
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
}
}
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
- struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
{
struct hclgevf_hw *hw = &hdev->hw;
- int ring_type = ring->flag;
- u32 reg_val;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- spin_lock_init(&ring->lock);
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
ring->dev = hdev;
+ ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
- return ret;
- }
- /* initialize the hardware registers with csq/crq dma-address,
- * descriptor number, head & tail pointers
- */
- switch (ring_type) {
- case HCLGEVF_TYPE_CSQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- return 0;
- case HCLGEVF_TYPE_CRQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- return 0;
- default:
- return -EINVAL;
- }
+ return ret;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status;
}
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
- /* setup Tx write back timeout */
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- /* setup queue CSQ/CRQ rings */
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CSQ ring\n", ret);
+ "CSQ ring setup error %d\n", ret);
return ret;
}
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CRQ ring\n", ret);
+ "CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- goto err_crq;
+ return ret;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_crq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
- return ret;
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0c8b62..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb945389..efec1b7a6a64 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
struct hclgevf_tqp *tqp;
int i;
- /* if this is on going reset then we need to re-allocate the TPQs
- * since we cannot assume we would get same number of TPQs back from PF
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
@@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- /* if this is on going reset then we need to re-allocate the hnae queues
- * as well since number of TPQs from PF might have changed.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
@@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -956,13 +947,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
return status;
}
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
- return tqp->index;
-}
-
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1081,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0);
}
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+ sizeof(new_mtu), true, NULL, 0);
+}
+
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
+ int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- return client->ops->reset_notify(handle, type);
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+ unsigned long delay_us,
+ unsigned long wait_cnt)
+{
+ unsigned long cnt = 0;
+
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < wait_cnt)
+ usleep_range(delay_us, delay_us * 2);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_RESET_WAIT_MS 500
-#define HCLGEVF_RESET_WAIT_CNT 20
- u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
/* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
- msleep(HCLGEVF_RESET_WAIT_MS);
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- cnt++;
- }
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ return hclgevf_flr_poll_timeout(hdev,
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_CNT);
+
+ ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
- dev_warn(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
- return -EBUSY;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could'nt get reset done status from h/w, timeout!\n");
+ return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1178,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
/* re-initialize the hclge device */
- ret = hclgevf_init_hdev(hdev);
+ ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1191,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
return 0;
}
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+ hdev->reset_type, ret);
+
+ return ret;
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ ret = hclgevf_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,58 +1254,118 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
-
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
- rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
- rtnl_unlock();
- return ret;
+ goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ goto err_reset_lock;
+ }
/* bring up the nic to enable TX/RX again */
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
return ret;
-}
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ /* When VF reset failed, only the higher level reset asserted by PF
+ * can restore it, so re-initialize the command queue to receive
+ * this higher reset event.
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
-{
- int status;
- u8 respmsg;
+ return ret;
+}
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF reset request to PF failed(=%d)\n", status);
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
- return status;
+ return rst_level;
}
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
- handle->reset_level = HNAE3_VF_RESET;
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- handle->last_reset_time = jiffies;
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS 100
+#define HCLGEVF_FLR_WAIT_CNT 50
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclgevf_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGEVF_FLR_WAIT_CNT)
+ msleep(HCLGEVF_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1454,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+ hdev->last_reset_time = jiffies;
+ while ((hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+ != HNAE3_NONE_RESET) {
+ ret = hclgevf_reset(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF stack reset failed %d.\n", ret);
+ }
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1352,19 +1491,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- /* request PF for resetting this VF via mailbox */
- ret = hclgevf_do_reset(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "VF rst fail, stack will call\n");
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
+ hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+ schedule_work(&hdev->keep_alive_task);
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+ u8 respmsg;
+ int ret;
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+ 0, false, &respmsg, sizeof(u8));
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
static void hclgevf_service_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
@@ -1407,24 +1566,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
{
- u32 cmdq_src_reg;
+ u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
- return true;
+ return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
- return false;
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1606,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+ enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
- if (!hclgevf_check_event_cause(hdev, &clearval))
- goto skip_sched;
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
- hclgevf_mbx_handler(hdev);
-
- hclgevf_clear_event_cause(hdev, clearval);
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ hclgevf_reset_task_schedule(hdev);
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
-skip_sched:
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+ hclgevf_clear_event_cause(hdev, clearval);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -1504,6 +1685,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1566,21 +1770,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_ae_start(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* ring enable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
- }
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -1595,24 +1785,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* Ring disable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
- }
-
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
del_timer_sync(&hdev->service_timer);
@@ -1621,12 +1797,40 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_update_link_status(hdev, 0);
}
-static void hclgevf_state_init(struct hclgevf_dev *hdev)
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = alive ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+ 0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+
+ del_timer_sync(&hdev->keep_alive_timer);
+ cancel_work_sync(&hdev->keep_alive_task);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1872,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1910,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
@@ -1721,6 +1922,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
@@ -1728,10 +1931,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2060,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
- /* check if we need to skip initialization of pci. This will happen if
- * device is undergoing VF reset. Otherwise, we would need to
- * re-initialize pci interface again i.e. when device is not going
- * through *any* reset or actually undergoing full reset.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2148,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
- /* check if device is on-going full reset(i.e. pcie as well) */
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
- dev_warn(&pdev->dev, "device is going full reset\n");
- hclgevf_uninit_hdev(hdev);
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
}
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret;
}
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ goto err_cmd_queue_init;
+ }
+
ret = hclgevf_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -1983,16 +2249,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
@@ -2001,6 +2268,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2288,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2034,6 +2307,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
return 0;
@@ -2043,25 +2317,31 @@ err_config:
err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_query_vf:
- hclgevf_cmd_uninit(hdev);
err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ }
+
hclgevf_cmd_uninit(hdev);
- hclgevf_uninit_msi(hdev);
- hclgevf_pci_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclgevf_init_hdev(ae_dev->priv);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
- return ret;
+ hdev = ae_dev->priv;
+ timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+ INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+ return 0;
}
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2437,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2159,13 +2452,38 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->reset_count;
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector,
@@ -2193,11 +2511,17 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
.get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e8ffab..4517b7ea5817 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -31,11 +31,19 @@
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING 0x20C00
-#define HCLGEVF_FUN_RST_ING_B 0
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
@@ -54,17 +62,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +161,17 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
+ unsigned long reset_count; /* the number of reset has been done */
u32 reset_attempts;
u32 fw_version;
@@ -178,7 +201,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer;
+ struct timer_list keep_alive_timer;
struct work_struct service_task;
+ struct work_struct keep_alive_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -192,18 +217,9 @@ struct hclgevf_dev {
u32 flag;
};
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
-{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+ return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f96304..ef9c8e6eca28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
udelay(HCLGEVF_SLEEP_USCOEND);
i++;
}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- hdev->nic.reset_level = HNAE3_VF_RESET;
+ reset_type = le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);