diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 954 |
1 files changed, 496 insertions, 458 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 718931d951bc..298cfbfcb7b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -28,6 +28,7 @@ #include <linux/bpf_trace.h> #include <linux/atomic.h> #include <linux/numa.h> +#include <generated/utsrelease.h> #include <scsi/fc/fc_fcoe.h> #include <net/udp_tunnel.h> #include <net/pkt_cls.h> @@ -35,7 +36,7 @@ #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include <net/xfrm.h> #include "ixgbe.h" @@ -56,8 +57,6 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "5.1.0-k" -const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; @@ -152,8 +151,8 @@ MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); #endif /* CONFIG_PCI_IOV */ -static unsigned int allow_unsupported_sfp; -module_param(allow_unsupported_sfp, uint, 0); +static bool allow_unsupported_sfp; +module_param(allow_unsupported_sfp, bool, 0); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); @@ -165,7 +164,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); + +DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); +EXPORT_SYMBOL(ixgbe_xdp_locking_key); static struct workqueue_struct *ixgbe_wq; @@ -227,7 +228,7 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) } /** - * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent * @hw: hw specific details * * This function is used by probe to determine whether a device's PCI-Express @@ -1397,7 +1398,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } - /* fall through - DCA is disabled. */ + fallthrough; /* DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); @@ -1522,7 +1523,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, } } -static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) +static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; } @@ -1563,7 +1564,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = ixgbe_rx_offset(rx_ring); + bi->page_offset = rx_ring->rx_offset; page_ref_add(page, USHRT_MAX - 1); bi->pagecnt_bias = USHRT_MAX; rx_ring->rx_stats.alloc_rx_page++; @@ -1827,7 +1828,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, @@ -1942,23 +1944,19 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static inline bool ixgbe_page_is_reserved(struct page *page) -{ - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); -} - -static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else /* The last offset is a bit aggressive in that we assume the @@ -2007,8 +2005,8 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + unsigned int truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(rx_ring->rx_offset + size) : SKB_DATA_ALIGN(size); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, @@ -2023,11 +2021,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, - const unsigned int size) + const unsigned int size, + int *rx_buffer_pgcnt) { struct ixgbe_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); *skb = rx_buffer->skb; @@ -2057,9 +2062,10 @@ skip_sync: static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb) + struct sk_buff *skb, + int rx_buffer_pgcnt) { - if (ixgbe_can_reuse_rx_page(rx_buffer)) { + if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -2097,10 +2103,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(xdp->data); -#if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); -#endif + net_prefetch(xdp->data); + /* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on @@ -2163,13 +2167,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, * likely have a consumer accessing first few bytes of meta * data, and then actual data. */ - prefetch(xdp->data_meta); -#if L1_CACHE_BYTES < 128 - prefetch(xdp->data_meta + L1_CACHE_BYTES); -#endif + net_prefetch(xdp->data_meta); /* build an skb to around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -2199,10 +2200,10 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; - rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (!xdp_prog) @@ -2215,48 +2216,63 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_PASS: break; case XDP_TX: - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) { - result = IXGBE_XDP_CONSUMED; - break; - } - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_failure; + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + if (result == IXGBE_XDP_CONSUMED) + goto out_failure; break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); - if (!err) - result = IXGBE_XDP_REDIR; - else - result = IXGBE_XDP_CONSUMED; + if (err) + goto out_failure; + result = IXGBE_XDP_REDIR; break; default: - bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; case XDP_ABORTED: +out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBE_XDP_CONSUMED; break; } xdp_out: - rcu_read_unlock(); return ERR_PTR(-result); } +static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(rx_ring->rx_offset + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, unsigned int size) { + unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); #if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; - rx_buffer->page_offset ^= truesize; #else - unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : - SKB_DATA_ALIGN(size); - rx_buffer->page_offset += truesize; #endif } @@ -2278,22 +2294,28 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; struct ixgbe_adapter *adapter = q_vector->adapter; #ifdef IXGBE_FCOE int ddp_bytes; unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); + unsigned int offset = rx_ring->rx_offset; unsigned int xdp_xmit = 0; struct xdp_buff xdp; - xdp.rxq = &rx_ring->xdp_rxq; + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + int rx_buffer_pgcnt; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2313,17 +2335,20 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); - rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { - xdp.data = page_address(rx_buffer->page) + - rx_buffer->page_offset; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - - ixgbe_rx_offset(rx_ring); - xdp.data_end = xdp.data + size; - + unsigned char *hard_start; + + hard_start = page_address(rx_buffer->page) + + rx_buffer->page_offset - offset; + xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); +#endif skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); } @@ -2355,7 +2380,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, break; } - ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -2407,13 +2432,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); @@ -2954,35 +2975,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, /* skip the flush */ } -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure @@ -3019,7 +3011,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1(hw); mask |= IXGBE_EIMS_GPI_SDP2(hw); - /* fall through */ + fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: @@ -3168,7 +3160,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) { - bool wd = ring->xsk_umem ? + bool wd = ring->xsk_pool ? ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : ixgbe_clean_tx_irq(q_vector, ring, budget); @@ -3188,7 +3180,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { - int cleaned = ring->xsk_umem ? + int cleaned = ring->xsk_pool ? ixgbe_clean_rx_irq_zc(q_vector, ring, per_ring_budget) : ixgbe_clean_rx_irq(q_vector, ring, @@ -3256,8 +3248,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ - irq_set_affinity_hint(entry->vector, - &q_vector->affinity_mask); + irq_update_affinity_hint(entry->vector, + &q_vector->affinity_mask); } } @@ -3273,8 +3265,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) free_queue_irqs: while (vector) { vector--; - irq_set_affinity_hint(adapter->msix_entries[vector].vector, - NULL); + irq_update_affinity_hint(adapter->msix_entries[vector].vector, + NULL); free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } @@ -3325,7 +3317,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); - /* Fall through */ + fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: @@ -3407,7 +3399,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) continue; /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(entry->vector, NULL); + irq_update_affinity_hint(entry->vector, NULL); free_irq(entry->vector, q_vector); } @@ -3483,9 +3475,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; - ring->xsk_umem = NULL; + ring->xsk_pool = NULL; if (ring_is_xdp(ring)) - ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); @@ -3725,9 +3717,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - if (rx_ring->xsk_umem) { - u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + if (rx_ring->xsk_pool) { + u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); /* If the MAC support setting RXDCTL.RLPML, the * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and @@ -4072,13 +4063,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u8 reg_idx = ring->reg_idx; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); - if (ring->xsk_umem) { - ring->zca.free = ixgbe_zca_free; + ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); + if (ring->xsk_pool) { WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &ring->zca)); - + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); } else { WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); @@ -4133,9 +4123,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } - if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { - u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + ring->rx_offset = ixgbe_rx_offset(ring); + + if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { + u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); @@ -4157,7 +4148,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); - if (ring->xsk_umem) + if (ring->xsk_pool) ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); else ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); @@ -4350,7 +4341,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; - /* fall through */ + fallthrough; case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -5009,24 +5000,41 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) napi_disable(&adapter->q_vector[q_idx]->napi); } -static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) +static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) { + struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - u32 vxlanctrl; + struct udp_tunnel_info ti; - if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | - IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) - return; + udp_tunnel_nic_get_port(dev, table, 0, &ti); + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + adapter->vxlan_port = ti.port; + else + adapter->geneve_port = ti.port; - vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, + ntohs(adapter->vxlan_port) | + ntohs(adapter->geneve_port) << + IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); + return 0; +} - if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) - adapter->vxlan_port = 0; +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { + .sync_table = ixgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; - if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) - adapter->geneve_port = 0; -} +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { + .sync_table = ixgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; #ifdef CONFIG_IXGBE_DCB /** @@ -5044,12 +5052,12 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); + netif_set_tso_max_size(adapter->netdev, 65536); return; } if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); + netif_set_tso_max_size(adapter->netdev, 32768); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) @@ -5153,7 +5161,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) } /** - * ixgbe_lpbthresh - calculate low water mark for for flow control + * ixgbe_lpbthresh - calculate low water mark for flow control * * @adapter: board private structure to calculate for * @pb: packet buffer to calculate @@ -5290,7 +5298,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - if (rx_ring->xsk_umem) { + if (rx_ring->xsk_pool) { ixgbe_xsk_clean_rx_ring(rx_ring); goto skip_free; } @@ -5394,9 +5402,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, return err; } -static int ixgbe_macvlan_up(struct net_device *vdev, void *data) +static int ixgbe_macvlan_up(struct net_device *vdev, + struct netdev_nested_priv *priv) { - struct ixgbe_adapter *adapter = data; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; struct ixgbe_fwd_adapter *accel; if (!netif_is_macvlan(vdev)) @@ -5413,8 +5422,12 @@ static int ixgbe_macvlan_up(struct net_device *vdev, void *data) static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) { + struct netdev_nested_priv priv = { + .data = (void *)adapter, + }; + netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_macvlan_up, adapter); + ixgbe_macvlan_up, &priv); } static void ixgbe_configure(struct ixgbe_adapter *adapter) @@ -5516,9 +5529,17 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) return ret; speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) + if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + /* remove NBASE-T speeds from default autonegotiation + * to accommodate broken network switches in the field + * which cannot cope with advertised NBASE-T speeds + */ + speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | + IXGBE_LINK_SPEED_2_5GB_FULL); + } + if (ret) return ret; @@ -5528,6 +5549,47 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) return ret; } +/** + * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo[i].last_vfstats.gprc = + IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gprc += + adapter->vfinfo[i].vfstats.gprc; + adapter->vfinfo[i].vfstats.gprc = 0; + adapter->vfinfo[i].last_vfstats.gptc = + IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gptc += + adapter->vfinfo[i].vfstats.gptc; + adapter->vfinfo[i].vfstats.gptc = 0; + adapter->vfinfo[i].last_vfstats.gorc = + IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gorc += + adapter->vfinfo[i].vfstats.gorc; + adapter->vfinfo[i].vfstats.gorc = 0; + adapter->vfinfo[i].last_vfstats.gotc = + IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gotc += + adapter->vfinfo[i].vfstats.gotc; + adapter->vfinfo[i].vfstats.gotc = 0; + adapter->vfinfo[i].last_vfstats.mprc = + IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.mprc += + adapter->vfinfo[i].vfstats.mprc; + adapter->vfinfo[i].vfstats.mprc = 0; + } +} + static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5663,15 +5725,18 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); + ixgbe_clear_vf_stats_counters(adapter); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { - WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ netif_trans_update(adapter->netdev); @@ -5900,7 +5965,7 @@ dma_engine_disable: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); - /* fall through */ + fallthrough; default: break; } @@ -5929,8 +5994,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; - case IXGBE_ERR_MASTER_REQUESTS_PENDING: - e_dev_err("master disable timed out\n"); + case IXGBE_ERR_PRIMARY_REQUESTS_PENDING: + e_dev_err("primary disable timed out\n"); break; case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ @@ -5978,7 +6043,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - if (tx_ring->xsk_umem) { + if (tx_ring->xsk_pool) { ixgbe_xsk_clean_tx_ring(tx_ring); goto out; } @@ -6125,11 +6190,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } /* disable transmits in the hardware now that interrupts are off */ @@ -6147,7 +6209,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** - * ixgbe_eee_capable - helper function to determine EEE support on X550 + * ixgbe_set_eee_capable - helper function to determine EEE support on X550 * @adapter: board private structure */ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) @@ -6174,8 +6236,9 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) /** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: queue number that timed out **/ -static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -6309,7 +6372,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (ixgbe_init_rss_key(adapter)) return -ENOMEM; - adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; @@ -6343,7 +6406,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_x550em_a: - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: @@ -6352,7 +6414,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, default: break; } - /* fall through */ + fallthrough; case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; @@ -6363,14 +6425,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->fcoe.up = 0; #endif /* IXGBE_DCB */ #endif /* IXGBE_FCOE */ - /* Fall Through */ + fallthrough; case ixgbe_mac_X550: if (hw->mac.type == ixgbe_mac_X550) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif - adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; break; default: break; @@ -6384,6 +6445,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); + #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif @@ -6526,6 +6590,13 @@ err_setup_tx: return err; } +static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) +{ + struct ixgbe_q_vector *q_vector = rx_ring->q_vector; + + return q_vector ? q_vector->napi.napi_id : 0; +} + /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: pointer to ixgbe_adapter @@ -6573,7 +6644,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index) < 0) + rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; @@ -6809,8 +6880,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); - ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); - udp_tunnel_get_rx_info(netdev); + udp_tunnel_nic_reset_ntf(netdev); return 0; @@ -6874,21 +6944,14 @@ int ixgbe_close(struct net_device *netdev) return 0; } -#ifdef CONFIG_PM -static int ixgbe_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbe_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; adapter->hw.hw_addr = adapter->io_addr; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { @@ -6899,7 +6962,7 @@ static int ixgbe_resume(struct pci_dev *pdev) clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); - pci_wake_from_d3(pdev, false); + device_wakeup_disable(dev_d); ixgbe_reset(adapter); @@ -6917,7 +6980,6 @@ static int ixgbe_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { @@ -6926,9 +6988,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) struct ixgbe_hw *hw = &adapter->hw; u32 ctrl; u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -6939,12 +6998,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif if (hw->mac.ops.stop_link_on_d3) hw->mac.ops.stop_link_on_d3(hw); @@ -6999,26 +7052,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -#ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbe_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); int retval; bool wake; retval = __ixgbe_shutdown(pdev, &wake); - if (retval) - return retval; - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + device_set_wakeup_enable(dev_d, wake); - return 0; + return retval; } -#endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { @@ -7064,7 +7109,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -7085,15 +7133,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; @@ -7175,7 +7228,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); - /* fall through */ + fallthrough; case ixgbe_mac_82599EB: for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += @@ -7263,6 +7316,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) netdev->stats.rx_length_errors = hwstats->rlec; netdev->stats.rx_crc_errors = hwstats->crcerrs; netdev->stats.rx_missed_errors = total_mpc; + + /* VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), + adapter->vfinfo[i].last_vfstats.gprc, + adapter->vfinfo[i].vfstats.gprc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), + adapter->vfinfo[i].last_vfstats.gptc, + adapter->vfinfo[i].vfstats.gptc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), + IXGBE_PVFGORC_MSB(i), + adapter->vfinfo[i].last_vfstats.gorc, + adapter->vfinfo[i].vfstats.gorc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), + IXGBE_PVFGOTC_MSB(i), + adapter->vfinfo[i].last_vfstats.gotc, + adapter->vfinfo[i].vfstats.gotc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), + adapter->vfinfo[i].last_vfstats.mprc, + adapter->vfinfo[i].vfstats.mprc); + } + } } /** @@ -7606,6 +7685,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } #ifdef CONFIG_PCI_IOV +static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { + adapter->vfinfo[vf].primary_abort_count++; + if (adapter->vfinfo[vf].primary_abort_count == + IXGBE_PRIMARY_ABORT_LIMIT) { + ixgbe_set_vf_link_state(adapter, vf, + IFLA_VF_LINK_STATE_DISABLE); + adapter->vfinfo[vf].primary_abort_count = 0; + + e_info(drv, + "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", + hw->bus.func, vf, + adapter->vfinfo[vf].vf_mac_addresses); + } + } +} + static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -7637,8 +7737,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) continue; pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && - status_reg & PCI_STATUS_REC_MASTER_ABORT) + status_reg & PCI_STATUS_REC_MASTER_ABORT) { + ixgbe_bad_vf_abort(adapter, vf); pcie_flr(vfdev); + } } } @@ -7924,12 +8026,6 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } - if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { - rtnl_lock(); - adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - udp_tunnel_get_rx_info(adapter->netdev); - rtnl_unlock(); - } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -8055,15 +8151,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, return 1; } -static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) -{ - unsigned int offset = 0; - - ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - - return offset == skb_checksum_start_offset(skb); -} - static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd) @@ -8084,19 +8171,16 @@ csum_failed: switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ - if (((first->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || - ((first->protocol == htons(ETH_P_IPV6)) && - ixgbe_ipv6_csum_is_sctp(skb))) { + if (skb_csum_is_sctp(skb)) { type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto csum_failed; @@ -8539,7 +8623,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; - /* fall through */ + fallthrough; default: return netdev_pick_tx(dev, skb, sb_dev); } @@ -8556,61 +8640,86 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - u32 len, cmd_type; - dma_addr_t dma; - u16 i; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 i = 0, index = ring->next_to_use; + struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; + struct ixgbe_tx_buffer *tx_buff = tx_head; + union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index); + u32 cmd_type, len = xdpf->len; + void *data = xdpf->data; - len = xdpf->len; - - if (unlikely(!ixgbe_desc_unused(ring))) + if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags)) return IXGBE_XDP_CONSUMED; - dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(ring->dev, dma)) - return IXGBE_XDP_CONSUMED; + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; - /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; + tx_desc->read.olinfo_status = + cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + goto unmap; - i = ring->next_to_use; - tx_desc = IXGBE_TX_DESC(ring, i); + dma_unmap_len_set(tx_buff, len, len); + dma_unmap_addr_set(tx_buff, dma, dma); - dma_unmap_len_set(tx_buffer, len, len); - dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->xdpf = xdpf; + cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS | len; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buff->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buff = &ring->tx_buffer_info[index]; + tx_desc = IXGBE_TX_DESC(ring, index); + tx_desc->read.olinfo_status = 0; + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } /* put descriptor type bits */ - cmd_type = IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT | - IXGBE_ADVTXD_DCMD_IFCS; - cmd_type |= len | IXGBE_TXD_CMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - tx_desc->read.olinfo_status = - cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); /* Avoid any potential race with xdp_xmit and cleanup */ smp_wmb(); - /* set next_to_watch value indicating a packet is present */ - i++; - if (i == ring->count) - i = 0; - - tx_buffer->next_to_watch = tx_desc; - ring->next_to_use = i; + tx_head->next_to_watch = tx_desc; + ring->next_to_use = index; return IXGBE_XDP_TX; + +unmap: + for (;;) { + tx_buff = &ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buff, len)) + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), + dma_unmap_len(tx_buff, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buff, len, 0); + if (tx_buff == tx_head) + break; + + if (!index) + index += ring->count; + index--; + } + + return IXGBE_XDP_CONSUMED; } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, @@ -8808,7 +8917,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ixgbe_mac_set_default_filter(adapter); @@ -8873,7 +8982,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) case SIOCGMIIPHY: if (!adapter->hw.phy.ops.read_reg) return -EOPNOTSUPP; - /* fall through */ + fallthrough; default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -8984,6 +9093,23 @@ static void ixgbe_get_stats64(struct net_device *netdev, stats->rx_missed_errors = netdev->stats.rx_missed_errors; } +static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf, + struct ifla_vf_stats *vf_stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc; + vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc; + vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc; + vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc; + vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc; + + return 0; +} + #ifdef CONFIG_IXGBE_DCB /** * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. @@ -9048,9 +9174,10 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) } #endif /* CONFIG_IXGBE_DCB */ -static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) +static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, + struct netdev_nested_priv *priv) { - struct ixgbe_adapter *adapter = data; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; struct ixgbe_fwd_adapter *accel; int pool; @@ -9087,13 +9214,16 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) static void ixgbe_defrag_macvlan_pools(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); + struct netdev_nested_priv priv = { + .data = (void *)adapter, + }; /* flush any stale bits out of the fwd bitmask */ bitmap_clear(adapter->fwd_bitmask, 1, 63); /* walk through upper devices reassigning pools */ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, - adapter); + &priv); } /** @@ -9267,14 +9397,18 @@ struct upper_walk_data { u8 queue; }; -static int get_macvlan_queue(struct net_device *upper, void *_data) +static int get_macvlan_queue(struct net_device *upper, + struct netdev_nested_priv *priv) { if (netif_is_macvlan(upper)) { struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); - struct upper_walk_data *data = _data; - struct ixgbe_adapter *adapter = data->adapter; - int ifindex = data->ifindex; + struct ixgbe_adapter *adapter; + struct upper_walk_data *data; + int ifindex; + data = (struct upper_walk_data *)priv->data; + ifindex = data->ifindex; + adapter = data->adapter; if (vadapter && upper->ifindex == ifindex) { data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; data->action = data->queue; @@ -9290,6 +9424,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int num_vfs = adapter->num_vfs, vf; + struct netdev_nested_priv priv; struct upper_walk_data data; struct net_device *upper; @@ -9309,8 +9444,9 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, data.ifindex = ifindex; data.action = 0; data.queue = 0; + priv.data = (void *)&data; if (netdev_walk_all_upper_dev_rcu(adapter->netdev, - get_macvlan_queue, &data)) { + get_macvlan_queue, &priv)) { *action = data.action; *queue = data.queue; @@ -9587,8 +9723,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); - if (!err) - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) @@ -9787,26 +9925,6 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; - if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - - if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) ixgbe_reset_l2fw_offload(adapter); else if (need_reset) @@ -9818,118 +9936,6 @@ static int ixgbe_set_features(struct net_device *netdev, return 1; } -/** - * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_add_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - __be16 port = ti->port; - u32 port_shift = 0; - u32 reg; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port == port) - return; - - if (adapter->vxlan_port) { - netdev_info(dev, - "VXLAN port %d set, not adding port %d\n", - ntohs(adapter->vxlan_port), - ntohs(port)); - return; - } - - adapter->vxlan_port = port; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port == port) - return; - - if (adapter->geneve_port) { - netdev_info(dev, - "GENEVE port %d set, not adding port %d\n", - ntohs(adapter->geneve_port), - ntohs(port)); - return; - } - - port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; - adapter->geneve_port = port; - break; - default: - return; - } - - reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); -} - -/** - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_del_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - u32 port_mask; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN && - ti->type != UDP_TUNNEL_TYPE_GENEVE) - return; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "VXLAN port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port != ti->port) { - netdev_info(dev, "GENEVE port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - break; - default: - return; - } - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -} - static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, @@ -10252,6 +10258,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) struct ixgbe_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; bool need_reset; + int num_queues; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return -EINVAL; @@ -10270,8 +10277,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -EINVAL; } - if (nr_cpu_ids > MAX_XDP_QUEUES) + /* if the number of cpus is much larger than the maximum of queues, + * we should stop it and then return with ENOMEM like before. + */ + if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) return -ENOMEM; + else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) + static_branch_inc(&ixgbe_xdp_locking_key); old_prog = xchg(&adapter->xdp_prog, prog); need_reset = (!!prog != !!old_prog); @@ -10301,11 +10313,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ - if (need_reset && prog) - for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->xdp_ring[i]->xsk_umem) + if (need_reset && prog) { + num_queues = min_t(int, adapter->num_rx_queues, + adapter->num_xdp_queues); + for (i = 0; i < num_queues; i++) + if (adapter->xdp_ring[i]->xsk_pool) (void)ixgbe_xsk_wakeup(adapter->netdev, i, XDP_WAKEUP_RX); + } return 0; } @@ -10317,12 +10332,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; - case XDP_SETUP_XSK_UMEM: - return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + case XDP_SETUP_XSK_POOL: + return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, xdp->xsk.queue_id); default: @@ -10339,12 +10350,21 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) writel(ring->next_to_use, ring->tail); } +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) +{ + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); +} + static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; - int drops = 0; + int nxmit = 0; int i; if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) @@ -10356,28 +10376,33 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; if (unlikely(!ring)) return -ENXIO; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) return -ENXIO; + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = ixgbe_xmit_xdp_ring(adapter, xdpf); - if (err != IXGBE_XDP_TX) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + err = ixgbe_xmit_xdp_ring(ring, xdpf); + if (err != IXGBE_XDP_TX) + break; + nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); - return n - drops; + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + + return nxmit; } static const struct net_device_ops ixgbe_netdev_ops = { @@ -10392,14 +10417,16 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, - .ndo_do_ioctl = ixgbe_ioctl, + .ndo_eth_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats, .ndo_get_stats64 = ixgbe_get_stats64, .ndo_setup_tc = __ixgbe_setup_tc, #ifdef IXGBE_FCOE @@ -10419,8 +10446,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, - .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, @@ -10663,7 +10688,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, /* only support first port */ if (hw->bus.func != 0) break; - /* fall through */ + fallthrough; case IXGBE_SUBDEV_ID_82599_SP_560FLR: case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: @@ -10747,9 +10772,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; + int i, err, expected_gts; bool disable_dev = false; #ifdef IXGBE_FCOE u16 device_caps; @@ -10769,16 +10794,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } - pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } err = pci_request_mem_regions(pdev, ixgbe_driver_name); @@ -10829,7 +10849,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); /* Setup hw api */ hw->mac.ops = *ii->mac_ops; @@ -10865,6 +10885,21 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; + break; + case ixgbe_mac_x550em_a: + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; + break; + default: + break; + } + /* Make sure the SWFW semaphore is in a valid state */ if (hw->mac.ops.init_swfw_sync) hw->mac.ops.init_swfw_sync(hw); @@ -10964,8 +10999,7 @@ skip_sriov: netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= netdev->vlan_features; @@ -11034,7 +11068,7 @@ skip_sriov: eth_platform_get_mac_address(&adapter->pdev->dev, adapter->hw.mac.perm_addr); - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.perm_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); @@ -11106,7 +11140,7 @@ skip_sriov: err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); if (err) - strlcpy(part_str, "Unknown", sizeof(part_str)); + strscpy(part_str, "Unknown", sizeof(part_str)); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, @@ -11159,8 +11193,8 @@ skip_sriov: */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, - sizeof(ixgbe_driver_version) - 1, - ixgbe_driver_version); + sizeof(UTS_RELEASE) - 1, + UTS_RELEASE); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); @@ -11180,10 +11214,14 @@ skip_sriov: IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); - ixgbe_mii_bus_init(hw); + err = ixgbe_mii_bus_init(hw); + if (err) + goto err_netdev; return 0; +err_netdev: + unregister_netdev(netdev); err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); @@ -11199,6 +11237,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -11494,16 +11533,15 @@ static const struct pci_error_handlers ixgbe_err_handler = { .resume = ixgbe_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); + static struct pci_driver ixgbe_driver = { - .name = ixgbe_driver_name, - .id_table = ixgbe_pci_tbl, - .probe = ixgbe_probe, - .remove = ixgbe_remove, -#ifdef CONFIG_PM - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, -#endif - .shutdown = ixgbe_shutdown, + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = ixgbe_remove, + .driver.pm = &ixgbe_pm_ops, + .shutdown = ixgbe_shutdown, .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; @@ -11517,7 +11555,7 @@ static struct pci_driver ixgbe_driver = { static int __init ixgbe_init_module(void) { int ret; - pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_driver_string); pr_info("%s\n", ixgbe_copyright); ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); |