diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 244 |
1 files changed, 221 insertions, 23 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index babc5d7a3b52..4cb2421e71a7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { + u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; struct hns3_tx_spare *tx_spare; struct page *page; - u32 alloc_size; dma_addr_t dma; int order; - alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; if (!alloc_size) return; order = get_order(alloc_size); + if (order >= MAX_ORDER) { + if (net_ratelimit()) + dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); + return; + } + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), GFP_KERNEL); if (!tx_spare) { /* The driver still work without the tx spare buffer */ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); - return; + goto devm_kzalloc_error; } page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), GFP_KERNEL, order); if (!page) { dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto alloc_pages_error; } dma = dma_map_page(ring_to_dev(ring), page, 0, PAGE_SIZE << order, DMA_TO_DEVICE); if (dma_mapping_error(ring_to_dev(ring), dma)) { dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); - put_page(page); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto dma_mapping_error; } tx_spare->dma = dma; tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + return; + +dma_mapping_error: + put_page(page); +alloc_pages_error: + devm_kfree(ring_to_dev(ring), tx_spare); +devm_kzalloc_error: + ring->tqp->handle->kinfo.tx_spare_buf_size = 0; } /* Use hns3_tx_spare_space() to make sure there is enough buffer @@ -1828,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) { if (!skb->encapsulation) - return skb_transport_offset(skb) + tcp_hdrlen(skb); + return skb_tcp_all_headers(skb); - return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + return skb_inner_tcp_all_headers(skb); } /* HW need every continuous max_non_tso_bd_num buffer data to be larger @@ -2028,9 +2038,73 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, return bd_num; } +static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num) +{ +#define HNS3_BYTES_PER_64BIT 8 + + struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {}; + int offset = 0; + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + do { + int idx = (ring->next_to_use - num + ring->desc_num) % + ring->desc_num; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_push++; + u64_stats_update_end(&ring->syncp); + memcpy(&desc[offset], &ring->desc[idx], + sizeof(struct hns3_desc)); + offset++; + } while (--num); + + __iowrite64_copy(ring->tqp->mem_base, desc, + (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) / + HNS3_BYTES_PER_64BIT); + + io_stop_wc(); +} + +static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) +{ +#define HNS3_MEM_DOORBELL_OFFSET 64 + + __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, + &bd_num, 1); + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_mem_doorbell += ring->pending_buf; + u64_stats_update_end(&ring->syncp); + + io_stop_wc(); +} + static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, bool doorbell) { + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + + /* when tx push is enabled, the packet whose number of BD below + * HNS3_MAX_PUSH_BD_NUM can be pushed directly. + */ + if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && + !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { + hns3_tx_push_bd(ring, num); + WRITE_ONCE(ring->last_to_use, ring->next_to_use); + return; + } + ring->pending_buf += num; if (!doorbell) { @@ -2038,11 +2112,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, return; } - if (!ring->pending_buf) - return; + if (ring->tqp->mem_base) + hns3_tx_mem_doorbell(ring); + else + writel(ring->pending_buf, + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); - writel(ring->pending_buf, - ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); ring->pending_buf = 0; WRITE_ONCE(ring->last_to_use, ring->next_to_use); } @@ -2732,6 +2807,9 @@ static void hns3_dump_queue_stats(struct net_device *ndev, "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n", + tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); } static void hns3_dump_queue_reg(struct net_device *ndev, @@ -2885,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); } +#define HNS3_INVALID_DSCP 0xff +#define HNS3_DSCP_SHIFT 2 + +static u8 hns3_get_skb_dscp(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + u8 dscp = HNS3_INVALID_DSCP; + + if (protocol == htons(ETH_P_8021Q)) + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT; + else if (protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT; + + return dscp; +} + +static u16 hns3_nic_select_queue(struct net_device *netdev, + struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u8 dscp; + + if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || + !h->ae_algo->ops->get_dscp_prio) + goto out; + + dscp = hns3_get_skb_dscp(skb); + if (unlikely(dscp >= HNAE3_MAX_DSCP)) + goto out; + + skb->priority = h->kinfo.dscp_prio[dscp]; + if (skb->priority == HNAE3_PRIO_ID_INVALID) + skb->priority = 0; + +out: + return netdev_pick_tx(netdev, skb, sb_dev); +} + static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, @@ -2910,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, .ndo_set_vf_rate = hns3_nic_set_vf_rate, .ndo_set_vf_mac = hns3_nic_set_vf_mac, + .ndo_select_queue = hns3_nic_select_queue, }; bool hns3_is_phys_func(struct pci_dev *pdev) @@ -2982,6 +3103,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; } +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + /* hns3_remove - Device removal routine * @pdev: PCI device information struct */ @@ -3020,7 +3156,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) else return num_vfs; } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -3175,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + if (hnae3_ae_dev_gro_supported(ae_dev)) netdev->features |= NETIF_F_GRO_HW; - if (!(h->flags & HNAE3_SUPPORT_VF)) - netdev->features |= NETIF_F_NTUPLE; - } + if (hnae3_ae_dev_fd_supported(ae_dev)) + netdev->features |= NETIF_F_NTUPLE; if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) netdev->features |= NETIF_F_GSO_UDP_L4; @@ -4554,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) goto map_ring_fail; netif_napi_add(priv->netdev, &tqp_vector->napi, - hns3_nic_common_poll, NAPI_POLL_WEIGHT); + hns3_nic_common_poll); } return 0; @@ -5063,10 +5201,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; @@ -5094,6 +5229,9 @@ static void hns3_state_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); @@ -5104,6 +5242,13 @@ static void hns3_state_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); } +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5221,7 +5366,9 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; out_reg_netdev_fail: + hns3_state_uninit(handle); hns3_dbg_uninit(handle); + hns3_client_stop(handle); out_client_start: hns3_free_rx_cpu_rmap(netdev); hns3_nic_uninit_irq(priv); @@ -5677,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev, return 0; } +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(ndev)) + hns3_nic_reset_all_ring(priv->ae_handle); + + hns3_reset_tx_queue(priv->ae_handle); +} + +void hns3_external_lb_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + hns3_nic_reset_all_ring(priv->ae_handle); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); + + netif_tx_wake_all_queues(ndev); + + if (h->ae_algo->ops->get_status(h)) + netif_carrier_on(ndev); +} + static const struct hns3_hw_error_info hns3_hw_err[] = { { .type = HNAE3_PPU_POISON_ERROR, .msg = "PPU poison" }, |