aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c953
1 files changed, 558 insertions, 395 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 162cb9afa0e7..18711e0f9bdf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -35,6 +35,13 @@ static const char hns3_driver_string[] =
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -67,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
- napi_schedule(&tqp_vector->napi);
+ napi_schedule_irqoff(&tqp_vector->napi);
return IRQ_HANDLED;
}
@@ -730,95 +737,6 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
return 0;
}
-static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
-{
- union l3_hdr_info l3;
- union l4_hdr_info l4;
- unsigned char *l2_hdr;
- u8 l4_proto = ol4_proto;
- u32 ol2_len;
- u32 ol3_len;
- u32 ol4_len;
- u32 l2_len;
- u32 l3_len;
-
- l3.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
-
- /* compute L2 header size for normal packet, defined in 2 Bytes */
- l2_len = l3.hdr - skb->data;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* tunnel packet*/
- if (skb->encapsulation) {
- /* compute OL2 header size, defined in 2 Bytes */
- ol2_len = l2_len;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
-
- /* compute OL3 header size, defined in 4 Bytes */
- ol3_len = l4.hdr - l3.hdr;
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
- ol3_len >> 2);
-
- /* MAC in UDP, MAC in GRE (0x6558)*/
- if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
- /* switch MAC header ptr from outer to inner header.*/
- l2_hdr = skb_inner_mac_header(skb);
-
- /* compute OL4 header size, defined in 4 Bytes. */
- ol4_len = l2_hdr - l4.hdr;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
-
- /* switch IP header ptr from outer to inner header */
- l3.hdr = skb_inner_network_header(skb);
-
- /* compute inner l2 header size, defined in 2 Bytes. */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
- l2_len >> 1);
- } else {
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-
- /* switch L4 header pointer from outer to inner */
- l4.hdr = skb_inner_transport_header(skb);
-
- l4_proto = il4_proto;
- }
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
- /* compute inner(/normal) L4 header size, defined in 4 Bytes */
- switch (l4_proto) {
- case IPPROTO_TCP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- l4.tcp->doff);
- break;
- case IPPROTO_SCTP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct sctphdr) >> 2));
- break;
- case IPPROTO_UDP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct udphdr) >> 2));
- break;
- default:
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-}
-
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
@@ -827,12 +745,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
-#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
- if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ if (!(!skb->encapsulation &&
+ l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;
skb_checksum_help(skb);
@@ -840,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
return true;
}
-static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u32 *ol_type_vlan_len_msec)
{
+ u32 l2_len, l3_len, l4_len;
+ unsigned char *il2_hdr;
union l3_hdr_info l3;
- u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
- /* define OL3 type and tunnel type(OL4).*/
- if (skb->encapsulation) {
- /* define outer network header type.*/
- if (skb->protocol == htons(ETH_P_IP)) {
- if (skb_is_gso(skb))
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
- else
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
-
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV6);
- }
+ /* compute OL2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
- /* define tunnel type(OL4).*/
- switch (l4_proto) {
- case IPPROTO_UDP:
+ /* compute OL3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ il2_hdr = skb_inner_mac_header(skb);
+ /* compute OL4 header size, defined in 4 Bytes. */
+ l4_len = il2_hdr - l4.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
+
+ /* define outer network header type */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb_is_gso(skb))
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
- break;
- case IPPROTO_GRE:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
+ else
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
- break;
- default:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
+ }
+
+ if (ol4_proto == IPPROTO_UDP)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
+ else if (ol4_proto == IPPROTO_GRE)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
+}
+
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
/* drop the skb tunnel packet if hardware don't support,
* because hardware can't calculate csum when TSO.
*/
@@ -893,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
l4_proto = il4_proto;
}
@@ -911,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_L3T_IPV6);
}
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
@@ -924,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
case IPPROTO_SCTP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -963,6 +926,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+
+ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+ * header is allowed in skb, otherwise it will cause RAS error.
+ */
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
@@ -984,8 +957,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE){
+ hns3_set_field(*out_vlan_flag,
+ HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hns3_set_field(*inner_vlan_flag,
+ HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
@@ -1012,7 +993,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
@@ -1042,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret))
return ret;
- hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
@@ -1073,19 +1051,37 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
- if (unlikely(dma_mapping_error(ring->dev, dma))) {
+ if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
desc_cb->length = size;
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ ring_ptr_move_fw(ring, next_to_use);
+ return 0;
+ }
+
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1112,64 +1108,92 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_bd_num(struct sk_buff *skb)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
- int bdnum_for_frag;
- int frag_num;
- int buf_num;
- int size;
- int i;
+ int size = skb_headlen(skb);
+ int i, bd_num;
- size = skb_headlen(skb);
- buf_num = hns3_tx_bd_count(size);
+ /* if the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
+ return skb_shinfo(skb)->nr_frags + 1;
+
+ bd_num = hns3_tx_bd_count(size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int frag_bd_num;
- frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
- frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = hns3_tx_bd_count(size);
- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
+ frag_bd_num = hns3_tx_bd_count(size);
+
+ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
- buf_num += bdnum_for_frag;
+ bd_num += frag_bd_num;
}
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
- return -EBUSY;
- /* manual split the send packet */
- new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb)
- return -ENOMEM;
- dev_kfree_skb_any(skb);
- *out_skb = new_skb;
- }
+ return bd_num;
+}
- if (unlikely(ring_space(ring) < buf_num))
- return -EBUSY;
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
- *bnum = buf_num;
- return 0;
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+/* HW need every continuous 8 buffer data to be larger than MSS,
+ * we simplify it by ensuring skb_headlen + the first continuous
+ * 7 frags to to be larger than gso header len + mss, and the remaining
+ * continuous 7 frags to be larger than MSS except the last 7 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb)
+{
+ int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < bd_limit; i++)
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ /* ensure headlen + the first 7 frags is greater than mss + header
+ * and the first 7 frags is greater than mss.
+ */
+ if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
+ hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ return true;
+
+ /* ensure the remaining continuous 7 buffer is greater than mss */
+ for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
+ tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- int buf_num;
+ int bd_num;
- /* No. of segments (plus a header) */
- buf_num = skb_shinfo(skb)->nr_frags + 1;
+ bd_num = hns3_nic_bd_num(skb);
+ if (bd_num < 0)
+ return bd_num;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
+
+ if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ goto out;
+
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
@@ -1177,14 +1201,17 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
}
- if (unlikely(ring_space(ring) < buf_num))
+out:
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
- *bnum = buf_num;
-
- return 0;
+ return bd_num;
}
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
@@ -1197,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
if (ring->next_to_use == next_to_use_orig)
break;
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
dma_unmap_single(dev,
@@ -1210,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
DMA_TO_DEVICE);
ring->desc_cb[ring->next_to_use].length = 0;
-
- /* rollback one */
- ring_ptr_move_bw(ring, next_to_use);
+ ring->desc_cb[ring->next_to_use].dma = 0;
}
}
@@ -1225,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
- int next_to_use_frag;
int buf_num;
int seg_num;
int size;
@@ -1235,22 +1262,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Prefetch the data used later */
prefetch(skb->data);
- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
- case -EBUSY:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(buf_num <= 0)) {
+ if (buf_num == -EBUSY) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+ goto out_net_tx_busy;
+ } else if (buf_num == -ENOMEM) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ }
- goto out_net_tx_busy;
- case -ENOMEM:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- netdev_err(netdev, "no memory to xmit!\n");
+ if (net_ratelimit())
+ netdev_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
- default:
- break;
}
/* No. of segments (plus a header) */
@@ -1263,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
if (unlikely(ret))
- goto head_fill_err;
+ goto fill_err;
- next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1276,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
DESC_TYPE_PAGE);
if (unlikely(ret))
- goto frag_fill_err;
+ goto fill_err;
}
/* Complete translate all packets */
@@ -1289,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_fill_err:
- hns3_clear_desc(ring, next_to_use_frag);
-
-head_fill_err:
+fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
@@ -1355,13 +1379,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
bool enable;
int ret;
- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
-
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable);
@@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
@@ -1590,13 +1610,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL;
+ struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
+ int fbd_num, fbd_oft;
+ int ebd_num, ebd_oft;
+ int bd_num, bd_err;
+ int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
- for (i = 0; i < ndev->real_num_tx_queues; i++) {
+ for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
@@ -1617,21 +1643,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return false;
}
+ priv->tx_timeout_count++;
+
tx_ring = priv->ring_data[timeout_queue].ring;
+ napi = &tx_ring->tqp_vector->napi;
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->update_stats &&
+ h->ae_algo->ops->get_mac_pause_stats) {
+ u64 tx_pause_cnt, rx_pause_cnt;
+
+ h->ae_algo->ops->update_stats(h, &ndev->stats);
+ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
+ &rx_pause_cnt);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ tx_pause_cnt, rx_pause_cnt);
+ }
hw_head = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
+ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBDNUM_REG);
+ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBD_OFFSET_REG);
+ bd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ bd_err = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_ERR_REG);
+ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
+ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
+
netdev_info(ndev,
- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
- priv->tx_timeout_count,
- timeout_queue,
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- hw_head,
- hw_tail,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true;
}
@@ -1644,8 +1715,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
if (!hns3_get_tx_timeo_queue_info(ndev))
return;
- priv->tx_timeout_count++;
-
/* request the reset, and let the hclge to determine
* which reset level should be done
*/
@@ -1670,7 +1739,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
-static bool hns3_is_phys_func(struct pci_dev *pdev)
+bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
@@ -2117,17 +2186,30 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].rx.bd_base_info = 0;
}
-static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
- int *pkts)
+static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
+ int *bytes, int *pkts)
{
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
- (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
- (*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ while (head != ntc) {
+ desc_cb = &ring->desc_cb[ntc];
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
+ hns3_free_buffer_detach(ring, ntc);
- ring_ptr_move_fw(ring, next_to_clean);
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* Issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ntc]);
+ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
}
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
@@ -2167,11 +2249,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
bytes = 0;
pkts = 0;
- while (head != ring->next_to_clean) {
- hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
- /* Issue prefetch for next Tx descriptor */
- prefetch(&ring->desc_cb[ring->next_to_clean]);
- }
+ hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2233,6 +2311,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_reuse_pg++;
+ u64_stats_update_end(&ring->syncp);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -2246,64 +2328,78 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
{
- struct hns3_desc *desc;
- u32 truesize;
- int size;
- int last_offset;
- bool twobufs;
-
- twobufs = ((PAGE_SIZE < 8192) &&
- hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
-
- desc = &ring->desc[ring->next_to_clean];
- size = le16_to_cpu(desc->rx.size);
-
- truesize = hnae3_buf_size(ring);
-
- if (!twobufs)
- last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ int size = le16_to_cpu(desc->rx.size);
+ u32 truesize = hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages,flag default unreuse */
- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
- return;
-
- if (twobufs) {
- /* If we are only owner of page we can reuse it */
- if (likely(page_count(desc_cb->priv) == 1)) {
- /* Flip page offset to other buffer */
- desc_cb->page_offset ^= truesize;
-
- desc_cb->reuse_flag = 1;
- /* bump ref count on page before it is given*/
- get_page(desc_cb->priv);
- }
+ /* Avoid re-using remote pages, or the stack is still using the page
+ * when page_offset rollback to zero, flag default unreuse
+ */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
+ (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
- }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset <= last_offset) {
+ if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/
get_page(desc_cb->priv);
+ } else if (page_count(desc_cb->priv) == 1) {
+ desc_cb->reuse_flag = 1;
+ desc_cb->page_offset = 0;
+ get_page(desc_cb->priv);
}
}
+static int hns3_gro_complete(struct sk_buff *skb)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (type == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ if (type == htons(ETH_P_IP)) {
+ depth += sizeof(struct iphdr);
+ } else if (type == htons(ETH_P_IPV6)) {
+ depth += sizeof(struct ipv6hdr);
+ } else {
+ netdev_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ th = (struct tcphdr *)(skb->data + depth);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- struct hns3_desc *desc)
+ u32 l234info, u32 bd_base_info, u32 ol_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
- u32 bd_base_info;
int ol4_type;
- u32 l234info;
-
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
skb->ip_summed = CHECKSUM_NONE;
@@ -2312,12 +2408,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
- /* We MUST enable hardware checksum before enabling hardware GRO */
- if (skb_shinfo(skb)->gso_size) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
-
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
@@ -2332,7 +2422,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
@@ -2370,6 +2460,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, u32 l234info,
u16 *vlan_tag)
{
+ struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
if (pdev->revision == 0x20) {
@@ -2382,15 +2473,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
return true;
case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
default:
return false;
}
@@ -2437,7 +2549,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
@@ -2512,8 +2624,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return 0;
}
-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
- u32 bd_base_info)
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info, u32 ol_info)
{
u16 gro_count;
u32 l3_type;
@@ -2521,12 +2634,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count)
- return;
+ if (!gro_count) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ return 0;
+ }
- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
- * to skb_shinfo(skb)->gso_segs
- */
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
@@ -2536,47 +2648,121 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
- return;
+ return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
- if (skb_shinfo(skb)->gso_size)
- tcp_gro_complete(skb);
+
+ return hns3_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 rss_hash)
{
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
- struct hns3_desc *desc;
- int last_bd;
-
- /* When driver handle the rss type, ring->next_to_clean indicates the
- * first descriptor of next packet, need -1 here.
- */
- last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
- desc = &ring->desc[last_bd];
- if (le32_to_cpu(desc->rx.rss_hash))
+ if (rss_hash)
rss_type = handle->kinfo.rss_type;
else
rss_type = PKT_HASH_TYPE_NONE;
- skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+ skb_set_hash(skb, rss_hash, rss_type);
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_vld_descs++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EINVAL;
+ }
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
+{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
u32 bd_base_info;
- u32 l234info;
int length;
int ret;
@@ -2636,64 +2822,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
- l234info = le32_to_cpu(desc->rx.l234_info);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (unlikely((!desc->rx.pkt_len) ||
- (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
- BIT(HNS3_RXD_L2E_B))))) {
- u64_stats_update_begin(&ring->syncp);
- if (l234info & BIT(HNS3_RXD_L2E_B))
- ring->stats.l2_err++;
- else
- ring->stats.err_pkt_len++;
- u64_stats_update_end(&ring->syncp);
-
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return ret;
}
-
- l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
- HNS3_RXD_DMAC_S);
- u64_stats_update_begin(&ring->syncp);
- if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
- ring->stats.rx_multicast++;
-
- ring->stats.rx_pkts++;
- ring->stats.rx_bytes += skb->len;
- u64_stats_update_end(&ring->syncp);
-
- ring->tqp_vector->rx_group.total_bytes += skb->len;
-
- /* This is needed in order to enable forwarding support */
- hns3_set_gro_param(skb, l234info, bd_base_info);
-
- hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
- hns3_set_rx_skb_rss_type(ring, skb);
return 0;
}
@@ -2703,9 +2838,8 @@ int hns3_clean_rx_ring(
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int num;
@@ -2714,6 +2848,7 @@ int hns3_clean_rx_ring(
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
+ unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
@@ -2740,8 +2875,6 @@ int hns3_clean_rx_ring(
continue;
}
- /* Do update ip stack process */
- skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
@@ -2891,7 +3024,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
struct hns3_enet_tqp_vector *tqp_vector =
container_of(napi, struct hns3_enet_tqp_vector, napi);
bool clean_complete = true;
- int rx_budget;
+ int rx_budget = budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
@@ -2905,7 +3038,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
- rx_budget = max(budget / tqp_vector->num_tqps, 1);
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
@@ -3316,6 +3450,7 @@ err:
}
devm_kfree(&pdev->dev, priv->ring_data);
+ priv->ring_data = NULL;
return ret;
}
@@ -3324,12 +3459,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
int i;
+ if (!priv->ring_data)
+ return;
+
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
+ priv->ring_data = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3339,8 +3478,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
if (ring->desc_num <= 0 || ring->buf_size <= 0)
return -EINVAL;
- ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
- GFP_KERNEL);
+ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
+ sizeof(ring->desc_cb[0]), GFP_KERNEL);
if (!ring->desc_cb) {
ret = -ENOMEM;
goto out;
@@ -3361,7 +3500,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
out_with_desc:
hns3_free_desc(ring);
out_with_desc_cb:
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
out:
return ret;
@@ -3370,7 +3509,7 @@ out:
static void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -3557,17 +3696,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
-static void hns3_nic_set_priv_ops(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
@@ -3584,6 +3712,21 @@ static void hns3_client_stop(struct hnae3_handle *handle)
handle->ae_algo->ops->client_stop(handle);
}
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3605,6 +3748,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -3617,7 +3762,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev);
- hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -3671,6 +3815,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
return ret;
out_client_start:
@@ -3697,13 +3844,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_client_stop(handle);
-
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_client_stop(handle);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3729,8 +3876,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_dbg_uninit(handle);
- priv->ring_data = NULL;
-
out_netdev_free:
free_netdev(netdev);
}
@@ -3745,11 +3890,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- netdev_info(netdev, "link down\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
}
}
@@ -3773,12 +3920,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
+ netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
/* go through and sync mc_addr entries to the device */
@@ -3786,9 +3934,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
+out:
+ netif_addr_unlock_bh(ndev);
return ret;
}
@@ -3799,6 +3949,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+ netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
@@ -3809,6 +3960,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -3850,6 +4003,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ring_ptr_move_fw(ring, next_to_use);
}
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
return 0;
}
@@ -4048,18 +4208,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_uninit_ring;
+ }
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
+err_uninit_ring:
+ hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
return ret;
}
@@ -4101,7 +4267,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
}
@@ -4121,9 +4287,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
-
- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}