aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c326
1 files changed, 211 insertions, 115 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 48549db23c52..34b744df6709 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -24,7 +25,7 @@
#include "hnae3.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -55,8 +56,6 @@
#define HCLGE_LINK_STATUS_MS 10
-#define HCLGE_VF_VPORT_START_NUM 1
-
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -628,7 +627,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -636,7 +635,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -930,7 +929,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return 0;
}
-static int hclge_parse_speed(int speed_cmd, int *speed)
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
case 6:
@@ -1373,6 +1372,8 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1391,7 +1392,9 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1406,8 +1409,12 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -4237,11 +4244,6 @@ static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_RSS_KEY_SIZE;
}
-static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_IND_TBL_SIZE;
-}
-
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
@@ -4283,6 +4285,7 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
+ int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
@@ -4291,8 +4294,10 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
@@ -4398,6 +4403,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
@@ -4422,7 +4428,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
/* Get indirect table */
if (indir)
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
return 0;
@@ -4431,6 +4437,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
@@ -4462,7 +4469,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* Update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
@@ -4494,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
+ struct ethtool_rxnfc *nfc,
+ struct hclge_rss_input_tuple_cmd *req)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
u8 tuple_sets;
- int ret;
-
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
@@ -4554,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4573,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
+ u8 *tuple_sets)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- u8 tuple_sets;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclge_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -4703,14 +4743,15 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
int i, j;
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
-static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
@@ -4718,6 +4759,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ u16 *rss_ind_tbl;
+
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
@@ -4739,11 +4782,19 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_algo = rss_algo;
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ vport[i].rss_indirection_tbl = rss_ind_tbl;
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
+
+ return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
@@ -5491,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->psrc)
@@ -5521,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->l4_proto)
@@ -5578,7 +5625,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
if (fs->m_ext.vlan_tci &&
be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
dev_err(&hdev->pdev->dev,
- "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
return -EINVAL;
}
@@ -8306,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
}
}
-void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
{
- int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_cfg, *tmp;
- struct hclge_dev *hdev = vport->back;
- struct list_head tmp_del_list, *list;
- int ret;
-
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- list = &vport->uc_mac_list;
- unsync = hclge_rm_uc_addr_common;
- } else {
- list = &vport->mc_mac_list;
- unsync = hclge_rm_mc_addr_common;
- }
-
- INIT_LIST_HEAD(&tmp_del_list);
-
- if (!is_del_list)
- set_bit(vport->vport_id, hdev->vport_config_block);
-
- spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, &tmp_del_list);
+ list_add_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -8345,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
break;
}
}
+}
- spin_unlock_bh(&vport->mac_list_lock);
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
- list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
@@ -8366,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
spin_lock_bh(&vport->mac_list_lock);
@@ -8772,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -8827,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -8841,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -8852,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9664,7 +9753,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
@@ -10588,7 +10677,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- hclge_rss_init_cfg(hdev);
+ ret = hclge_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
@@ -10816,7 +10910,7 @@ static void hclge_reset_vf_rate(struct hclge_dev *hdev)
}
}
-static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
@@ -10837,7 +10931,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
if (ret)
return ret;
@@ -11079,6 +11173,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
@@ -11122,11 +11217,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
@@ -11806,7 +11902,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
- .get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -11857,6 +11952,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,