diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 421 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 |
9 files changed, 380 insertions, 137 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 144d5fe6b944..4fc906c6166b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_disable_rx(struct ixgbe_adapter *adapter); +void ixgbe_disable_tx(struct ixgbe_adapter *adapter); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3f5c350716bb..0bd1294ba517 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; + /* Record lower 32 bits of MAC address and then make + * sure that write is flushed to hardware before writing + * the upper 16 bits and setting the valid bit. + */ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return 0; @@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + /* Clear the address valid bit and upper 16 bits of the address + * before clearing the lower bits. This way we aren't updating + * a live filter. + */ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bd1ba88ec1d5..e5a8461fe6a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) static int ixgbe_get_regs_len(struct net_device *netdev) { -#define IXGBE_REGS_LEN 1139 +#define IXGBE_REGS_LEN 1145 return IXGBE_REGS_LEN * sizeof(u32); } @@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev, /* X540 specific DCB registers */ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); + + /* Security config registers */ + regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); + regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); } static int ixgbe_get_eeprom_len(struct net_device *netdev) @@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) { - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_ctl; - - /* shut down the DMA engines now so they can be reinitialized later */ + /* Shut down the DMA engines now so they can be reinitialized later, + * since the test rings and normally used rings should overlap on + * queue 0 we can just use the standard disable Rx/Tx calls and they + * will take care of disabling the test rings for us. + */ /* first Rx */ - hw->mac.ops.disable_rx(hw); - ixgbe_disable_rx_queue(adapter, rx_ring); + ixgbe_disable_rx(adapter); /* now Tx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); - reg_ctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg_ctl &= ~IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); - break; - default: - break; - } + ixgbe_disable_tx(adapter); ixgbe_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 94b3165ff543..ccd852ad62a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; @@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) return 0; /* Extra buffer to be shared by all DDPs for HW work around */ - buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index c116f459945d..da4322e4daed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, } itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { + if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", __func__, itd->sa_idx, xs->xso.offload_handle); return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..9a23d33a47ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf); @@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); @@ -4018,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, } } -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - if (ixgbe_removed(hw->hw_addr)) - return; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - - /* write value back with RXDCTL.ENABLE bit cleared */ - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* the hardware may take up to 100us to really disable the rx queue */ - do { - udelay(10); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -4059,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - /* disable queue to avoid issues while updating state */ + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - ixgbe_disable_rx_queue(adapter, ring); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); @@ -5271,6 +5247,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int num_tc = netdev_get_num_tc(adapter->netdev); struct net_device *vdev = accel->netdev; int i, baseq, err; @@ -5282,6 +5260,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; + /* record configuration for macvlan interface in vdev */ + for (i = 0; i < num_tc; i++) + netdev_bind_sb_channel_queue(adapter->netdev, vdev, + i, rss_i, baseq + (rss_i * i)); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = vdev; @@ -5306,6 +5289,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); @@ -5618,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter) ixgbe_up_complete(adapter); } +static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) +{ + u16 devctl2; + + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); + + switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { + case IXGBE_PCIDEVCTRL2_17_34s: + case IXGBE_PCIDEVCTRL2_4_8s: + /* For now we cap the upper limit on delay to 2 seconds + * as we end up going up to 34 seconds of delay in worst + * case timeout value. + */ + case IXGBE_PCIDEVCTRL2_1_2s: + return 2000000ul; /* 2.0 s */ + case IXGBE_PCIDEVCTRL2_260_520ms: + return 520000ul; /* 520 ms */ + case IXGBE_PCIDEVCTRL2_65_130ms: + return 130000ul; /* 130 ms */ + case IXGBE_PCIDEVCTRL2_16_32ms: + return 32000ul; /* 32 ms */ + case IXGBE_PCIDEVCTRL2_1_2ms: + return 2000ul; /* 2 ms */ + case IXGBE_PCIDEVCTRL2_50_100us: + return 100ul; /* 100 us */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: + return 32000ul; /* 32 ms */ + default: + break; + } + + /* We shouldn't need to hit this path, but just in case default as + * though completion timeout is not supported and support 32ms. + */ + return 32000ul; +} + +void ixgbe_disable_rx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 rxdctl; + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + } + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = 0; + + /* OR together the reading of all the active RXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, + "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); +} + +void ixgbe_disable_tx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 txdctl; + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Tx queues */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* disable all enabled XDP Tx queues */ + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* If the link is not up there shouldn't be much in the way of + * pending transactions. Those that are left will be flushed out + * when the reset logic goes through the flush sequence to clean out + * the pending Tx transactions. + */ + if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + goto dma_engine_disable; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = 0; + + /* OR together the reading of all the active TXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + goto dma_engine_disable; + } + + e_err(drv, + "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); + +dma_engine_disable: + /* Disable the Tx DMA engine on 82599 and later MAC */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + /* fall through */ + default: + break; + } +} + void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5799,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter) if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable receives */ - hw->mac.ops.disable_rx(hw); + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); - /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) - /* this call also flushes the previous write */ - ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); - usleep_range(10000, 20000); + /* Disable Rx */ + ixgbe_disable_rx(adapter); /* synchronize_sched() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) synchronize_sched(); - netif_tx_stop_all_queues(netdev); - - /* call carrier off first to avoid false dev_watchdog timeouts */ - netif_carrier_off(netdev); - netif_tx_disable(netdev); ixgbe_irq_disable(adapter); @@ -5844,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - for (i = 0; i < adapter->num_xdp_queues; i++) { - u8 reg_idx = adapter->xdp_ring[i]->reg_idx; - - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - - /* Disable the Tx DMA engine on 82599 and later MAC */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); - break; - default: - break; - } + ixgbe_disable_tx(adapter); if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); @@ -6036,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->mac_table = kcalloc(hw->mac.num_rar_entries, sizeof(struct ixgbe_mac_addr), - GFP_ATOMIC); + GFP_KERNEL); if (!adapter->mac_table) return -ENOMEM; @@ -6454,6 +6619,21 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (adapter->xdp_prog) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (new_frame_size > ixgbe_rx_bufsz(ring)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; + } + } + } + /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a @@ -8193,25 +8373,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { - struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; struct ixgbe_adapter *adapter; - int txq; -#ifdef IXGBE_FCOE struct ixgbe_ring_feature *f; -#endif + int txq; - if (fwd_adapter) { - adapter = netdev_priv(dev); - txq = reciprocal_scale(skb_get_hash(skb), - adapter->num_rx_queues_per_pool); + if (sb_dev) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + struct net_device *vdev = sb_dev; - return txq + fwd_adapter->tx_base_queue; - } + txq = vdev->tc_to_txq[tc].offset; + txq += reciprocal_scale(skb_get_hash(skb), + vdev->tc_to_txq[tc].count); -#ifdef IXGBE_FCOE + return txq; + } /* * only execute the code below if protocol is FCoE @@ -8222,11 +8402,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, case htons(ETH_P_FIP): adapter = netdev_priv(dev); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; /* fall through */ default: - return fallback(dev, skb); + return fallback(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -8238,11 +8418,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= f->indices; return txq + f->offset; -#else - return fallback(dev, skb); -#endif } +#endif static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf) { @@ -8762,6 +8940,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) /* if we cannot find a free pool then disable the offload */ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); macvlan_release_l2fw_offload(vdev); + + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + kfree(accel); return 0; @@ -8810,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) #ifdef CONFIG_IXGBE_DCB if (tc) { + if (adapter->xdp_prog) { + e_warn(probe, "DCB is not supported with XDP\n"); + + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ixgbe_open(dev); + return -EINVAL; + } + netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); @@ -8998,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; - LIST_HEAD(actions); + int i; if (!tcf_exts_has_actions(exts)) return -EINVAL; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { - + tcf_exts_for_each_action(i, a, exts) { /* Drop action */ if (is_tcf_gact_shot(a)) { *action = IXGBE_FDIR_DROP_QUEUE; @@ -9325,7 +9515,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, adapter); @@ -9389,6 +9579,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; + if (adapter->xdp_prog && (features & NETIF_F_LRO)) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + return features; } @@ -9758,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) int tcs = adapter->hw_tcs ? : 1; int pool, err; + if (adapter->xdp_prog) { + e_warn(probe, "L2FW offload is not supported with XDP\n"); + return ERR_PTR(-EINVAL); + } + /* The hardware supported by ixgbe only filters on the destination MAC * address. In order to avoid issues we only support offloading modes * where the hardware can actually provide the functionality. @@ -9765,6 +9965,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); + /* We need to lock down the macvlan to be a single queue device so that + * we can reuse the tc_to_txq field in the macvlan netdev to represent + * the queue mapping to our netdev. + */ + if (netif_is_multiqueue(vdev)) + return ERR_PTR(-ERANGE); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool == adapter->num_rx_pools) { u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; @@ -9821,6 +10028,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) return ERR_PTR(-ENOMEM); set_bit(pool, adapter->fwd_bitmask); + netdev_set_sb_channel(vdev, pool); accel->pool = pool; accel->netdev = vdev; @@ -9862,6 +10070,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ring->netdev = NULL; } + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(pdev, accel->netdev); + netdev_set_sb_channel(accel->netdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); } @@ -9962,7 +10174,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; @@ -10022,7 +10233,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_select_queue = ixgbe_select_queue, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -10045,6 +10255,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE + .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6f59933cdff7..3c6f01c41b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int i; + if (adapter->xdp_prog) { + e_warn(probe, "SRIOV is not supported with XDP\n"); + return -EINVAL; + } + /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED; @@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 num_tcs = adapter->hw_tcs; + u32 reg_val; + u32 queue; + u32 word; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); @@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset VF api back to unknown */ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; + + /* Restart each queue for given VF */ + for (queue = 0; queue < q_per_pool; queue++) { + unsigned int reg_idx = (vf * q_per_pool) + queue; + + reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); + + /* Re-enabling only configured queues */ + if (reg_val) { + reg_val |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); + } + } + + /* Clear VF's mailbox memory */ + for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); + + IXGBE_WRITE_FLUSH(hw); } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 44cfb2021145..41bcbb337e83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2518,6 +2518,7 @@ enum { /* Translated register #defines */ #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59416eddd840..d86446d202d5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; |