diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/e100.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_main.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 135 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 54 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 83 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_phy.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/netdev.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 80 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ethtool.c | 179 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 144 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 144 |
17 files changed, 661 insertions, 284 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ada6e210279f..cbaba4442d4b 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2985,7 +2985,6 @@ err_out_free_res: err_out_disable_pdev: pci_disable_device(pdev); err_out_free_dev: - pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } @@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev) free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 59ad007dd5aa..ad6800ad1bfc 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3917,8 +3917,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct e1000_tx_ring)), + (unsigned long)(tx_ring - adapter->tx_ring), readl(hw->hw_addr + tx_ring->tdh), readl(hw->hw_addr + tx_ring->tdt), tx_ring->next_to_use, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c06a76ca9aaa..1ca9834cdfda 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -46,7 +46,6 @@ #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> -#include <linux/version.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> @@ -545,6 +544,7 @@ static inline void i40e_dbg_init(void) {} static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 19e248ff6c77..ef4cb1cf31f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; - char dump_request_buf[16]; bool seid_found = false; - int bytes_not_copied; long seid = -1; int buflen = 0; int i, ret; @@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, /* don't allow partial writes */ if (*ppos != 0) return 0; - if (count >= sizeof(dump_request_buf)) - return -ENOSPC; - - bytes_not_copied = copy_from_user(dump_request_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - if (bytes_not_copied > 0) - count -= bytes_not_copied; - dump_request_buf[count] = '\0'; /* decode the SEID given to be dumped */ - ret = kstrtol(dump_request_buf, 0, &seid); - if (ret < 0) { - dev_info(&pf->pdev->dev, "bad seid value '%s'\n", - dump_request_buf); + ret = kstrtol_from_user(buffer, count, 0, &seid); + + if (ret) { + dev_info(&pf->pdev->dev, "bad seid value\n"); } else if (seid == 0) { seid_found = true; @@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, memcpy(p, vsi, len); p += len; - len = (sizeof(struct i40e_q_vector) - * vsi->num_q_vectors); - memcpy(p, vsi->q_vectors, len); - p += len; - - len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); - memcpy(p, vsi->tx_rings, len); - p += len; - memcpy(p, vsi->rx_rings, len); - p += len; + if (vsi->num_q_vectors) { + len = (sizeof(struct i40e_q_vector) + * vsi->num_q_vectors); + memcpy(p, vsi->q_vectors, len); + p += len; + } - for (i = 0; i < vsi->num_queue_pairs; i++) { - len = sizeof(struct i40e_tx_buffer); - memcpy(p, vsi->tx_rings[i]->tx_bi, len); + if (vsi->num_queue_pairs) { + len = (sizeof(struct i40e_ring) * + vsi->num_queue_pairs); + memcpy(p, vsi->tx_rings, len); + p += len; + memcpy(p, vsi->rx_rings, len); p += len; } - for (i = 0; i < vsi->num_queue_pairs; i++) { + + if (vsi->tx_rings[0]) { + len = sizeof(struct i40e_tx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->tx_rings[i]->tx_bi, len); + p += len; + } len = sizeof(struct i40e_rx_buffer); - memcpy(p, vsi->rx_rings[i]->rx_bi, len); - p += len; + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->rx_rings[i]->rx_bi, len); + p += len; + } } /* macvlan filter list */ @@ -1023,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; int bytes_not_copied; struct i40e_vsi *vsi; u8 *print_buf_start; u8 *print_buf; - char *cmd_buf; int vsi_seid; int veb_seid; int cnt; @@ -1046,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp, count -= bytes_not_copied; cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); if (!print_buf_start) goto command_write_done; @@ -1152,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, i40e_veb_release(pf->veb[i]); } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { - u8 ma[6]; - int vlan = 0; struct i40e_mac_filter *f; + int vlan = 0; + u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], @@ -1190,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ma, vlan, vsi_seid, f, ret); } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { - u8 ma[6]; int vlan = 0; + u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], @@ -1227,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ma, vlan, vsi_seid, ret); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { - int v; - u16 vid; i40e_status ret; + u16 vid; + int v; cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); if (cnt != 2) { @@ -1540,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { struct i40e_fdir_data fd_data; - int ret; u16 packet_len, i, j = 0; char *asc_packet; bool add = false; + int ret; asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, GFP_KERNEL); @@ -1631,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { + u16 llen, rlen; int ret, i; u8 *buff; - u16 llen, rlen; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1664,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { + u16 llen, rlen; int ret, i; u8 *buff; - u16 llen, rlen; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1742,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - /* Read at least 512 words */ - if (buffer_len == 0) - buffer_len = 512; + /* set the max length */ + buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); bytes = 2 * buffer_len; + + /* read at least 1k bytes, no more than 4kB */ + bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); buff = kzalloc(bytes, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1898,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, struct i40e_pf *pf = filp->private_data; int bytes_not_copied; struct i40e_vsi *vsi; + char *buf_tmp; int vsi_seid; int i, cnt; @@ -1916,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, count -= bytes_not_copied; i40e_dbg_netdev_ops_buf[count] = '\0'; + buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); + if (buf_tmp) { + *buf_tmp = '\0'; + count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; + } + if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); if (cnt != 1) { @@ -2019,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { - struct dentry *pfile __attribute__((unused)); + struct dentry *pfile; const char *name = pci_name(pf->pdev); + const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); - if (pf->i40e_dbg_pf) { - pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, - pf, &i40e_dbg_command_fops); - pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_dump_fops); - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, - pf, &i40e_dbg_netdev_ops_fops); - } else { - dev_info(&pf->pdev->dev, - "debugfs entry for %s failed\n", name); - } + if (!pf->i40e_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_dump_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->i40e_dbg_pf); + return; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fbe7fe2914a9..be15938ba213 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -36,7 +36,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -2174,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) /* Now associate this queue with this PCI function */ qtx_ctl = I40E_QTX_CTL_PF_QUEUE; - qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) - & I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); @@ -2532,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ -static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; @@ -2560,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - i40e_flush(hw); + /* skip the flush */ } /** @@ -2709,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) i40e_irq_dynamic_enable_icr0(pf); } + i40e_flush(&pf->hw); return 0; } @@ -2741,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data) icr0 = rd32(hw, I40E_PFINT_ICR0); - /* if sharing a legacy IRQ, we might get called w/o an intr pending */ - if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) - return IRQ_NONE; - val = rd32(hw, I40E_PFINT_DYN_CTL0); val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; wr32(hw, I40E_PFINT_DYN_CTL0, val); + /* if sharing a legacy IRQ, we might get called w/o an intr pending */ + if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) + return IRQ_NONE; + ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ @@ -2762,7 +2763,6 @@ static irqreturn_t i40e_intr(int irq, void *data) qval = rd32(hw, I40E_QINT_TQCTL(0)); qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_flush(hw); if (!test_bit(__I40E_DOWN, &pf->state)) napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); @@ -2824,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data) /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); - i40e_flush(hw); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf); @@ -4614,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf) bool new_vsi = false; int err, i; - if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) + if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED | + I40E_FLAG_FDIR_ATR_ENABLED))) return; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; @@ -5159,11 +5159,12 @@ static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) { int i; - for (i = 0; i < vsi->alloc_queue_pairs; i++) { - kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - vsi->rx_rings[i] = NULL; - } + if (vsi->tx_rings[0]) + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + vsi->rx_rings[i] = NULL; + } return 0; } @@ -5433,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_init_msix(pf); if (err) { - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | + I40E_FLAG_RSS_ENABLED | I40E_FLAG_MQ_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | @@ -5448,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { + dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); err = pci_enable_msi(pf->pdev); if (err) { - dev_info(&pf->pdev->dev, - "MSI init failed (%d), trying legacy.\n", err); + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } } + if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); + /* track first vector for misc interrupts */ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); } @@ -6108,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } - vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, - vsi->num_q_vectors, vsi->idx); + if (vsi->num_q_vectors) + vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, + vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, "failed to get q tracking for VSI %d, err=%d\n", @@ -7198,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; pf->vsi = kzalloc(len, GFP_KERNEL); - if (!pf->vsi) + if (!pf->vsi) { + err = -ENOMEM; goto err_switch_setup; + } err = i40e_setup_pf_switch(pf); if (err) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index dc89e72fd0f4..f1f03bc5c729 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); } +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_input: Packet data that will be filter parameters @@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_desc; struct i40e_ring *tx_ring; + unsigned int fpt, dcc; struct i40e_vsi *vsi; struct device *dev; dma_addr_t dma; @@ -68,7 +70,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, dev = tx_ring->dev; dma = dma_map_single(dev, fdir_data->raw_packet, - I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); + I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_fail; @@ -77,74 +79,61 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); tx_buf = &tx_ring->tx_bi[i]; - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; - fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index - << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) - & I40E_TXD_FLTR_QW0_QINDEX_MASK); + fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; - fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off - << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) - & I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK; - fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype - << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - & I40E_TXD_FLTR_QW0_PCTYPE_MASK); + fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK; /* Use LAN VSI Id if not programmed by user */ if (fdir_data->dest_vsi == 0) - fdir_desc->qindex_flex_ptype_vsi |= - cpu_to_le32((pf->vsi[pf->lan_vsi]->id) - << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); + fpt |= (pf->vsi[pf->lan_vsi]->id) << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; else - fdir_desc->qindex_flex_ptype_vsi |= - cpu_to_le32((fdir_data->dest_vsi - << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - & I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + fpt |= ((u32)fdir_data->dest_vsi << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK; - fdir_desc->dtype_cmd_cntindex = - cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG); + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt); + + dcc = I40E_TX_DESC_DTYPE_FILTER_PROG; if (add) - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE - << I40E_TXD_FLTR_QW1_PCMD_SHIFT); + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; else - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE - << I40E_TXD_FLTR_QW1_PCMD_SHIFT); + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl - << I40E_TXD_FLTR_QW1_DEST_SHIFT) - & I40E_TXD_FLTR_QW1_DEST_MASK); + dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK; - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - & I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK; if (fdir_data->cnt_index != 0) { - fdir_desc->dtype_cmd_cntindex |= - cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); - fdir_desc->dtype_cmd_cntindex |= - cpu_to_le32((fdir_data->cnt_index - << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - & I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dcc |= ((u32)fdir_data->cnt_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; } + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); /* Now program a dummy descriptor */ i = tx_ring->next_to_use; tx_desc = I40E_TX_DESC(tx_ring, i); - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; tx_desc->buffer_addr = cpu_to_le64(dma); - td_cmd = I40E_TX_DESC_CMD_EOP | - I40E_TX_DESC_CMD_RS | - I40E_TX_DESC_CMD_DUMMY; + td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); @@ -559,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) i40e_set_new_dynamic_itr(&q_vector->tx); if (old_itr != q_vector->tx.itr) wr32(hw, reg_addr, q_vector->tx.itr); - - i40e_flush(hw); } /** @@ -1155,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) qval = rd32(hw, I40E_QINT_TQCTL(0)); qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_flush(hw); + + i40e_irq_dynamic_enable_icr0(vsi->back); } } @@ -1256,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); } -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 8967e58e2408..07596982a477 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN( - ((pf->hw.func_caps.num_msix_vectors_vf - 1) + (pf->hw.func_caps.num_msix_vectors_vf * vf->vf_id) + (vector_id - 1)); if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { @@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, /* associate this queue with the PCI VF function */ qtx_ctl = I40E_QTX_CTL_VF_QUEUE; - qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index e7266759a10b..c4c4fe332c7e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) hw_dbg("Error committing the PHY changes\n"); goto out; } - if (phy->type == e1000_phy_i210) { - ret_val = igb_set_master_slave_mode(hw); - if (ret_val) - return ret_val; - } out: return ret_val; @@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) hw_dbg("Error committing the PHY changes\n"); return ret_val; } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 0ae3177416c7..b918ba3640f9 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -771,8 +771,10 @@ static int igb_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; - if (hw->mac.type == e1000_i211) + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { return -EOPNOTSUPP; + } if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a505d3bad09a..ebe6370c4b18 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); #endif #ifdef CONFIG_PM @@ -2429,7 +2430,7 @@ err_dma: } #ifdef CONFIG_PCI_IOV -static int igb_disable_sriov(struct pci_dev *pdev) +static int igb_disable_sriov(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -2470,27 +2471,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) int err = 0; int i; - if (!adapter->msix_entries) { + if (!adapter->msix_entries || num_vfs > 7) { err = -EPERM; goto out; } - if (!num_vfs) goto out; - else if (old_vfs && old_vfs == num_vfs) - goto out; - else if (old_vfs && old_vfs != num_vfs) - err = igb_disable_sriov(pdev); - - if (err) - goto out; - if (num_vfs > 7) { - err = -EPERM; - goto out; - } - - adapter->vfs_allocated_count = num_vfs; + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); @@ -2504,10 +2497,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) goto out; } - err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); - if (err) - goto err_out; - + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } dev_info(&pdev->dev, "%d VFs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) @@ -2623,7 +2618,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) return; pci_sriov_set_totalvfs(pdev, 7); - igb_enable_sriov(pdev, max_vfs); + igb_pci_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 93eb7ee06d3e..9fadbb28cf08 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) struct igbvf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) return -EINVAL; - } #define MAX_STD_JUMBO_FRAME_SIZE 9234 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { @@ -2699,7 +2698,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ei->get_variants) { err = ei->get_variants(adapter); if (err) - goto err_ioremap; + goto err_get_variants; } /* setup adapter struct */ @@ -2796,6 +2795,7 @@ err_hw_init: kfree(adapter->rx_ring); err_sw_init: igbvf_reset_interrupt_capability(adapter); +err_get_variants: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index dc1588ee264a..09149143ee0f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -67,7 +67,11 @@ #define IXGBE_MAX_TXD 4096 #define IXGBE_MIN_TXD 64 +#if (PAGE_SIZE < 8192) #define IXGBE_DEFAULT_RXD 512 +#else +#define IXGBE_DEFAULT_RXD 128 +#endif #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 @@ -369,11 +373,13 @@ struct ixgbe_q_vector { #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int state; #define IXGBE_QV_STATE_IDLE 0 -#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ -#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ -#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) -#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */ -#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */ +#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED) +#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) spinlock_t lock; @@ -394,7 +400,7 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) { int rc = true; - spin_lock(&q_vector->lock); + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBE_QV_LOCKED) { WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; @@ -405,7 +411,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) } else /* we don't care if someone yielded */ q_vector->state = IXGBE_QV_STATE_NAPI; - spin_unlock(&q_vector->lock); + spin_unlock_bh(&q_vector->lock); return rc; } @@ -413,14 +419,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) { int rc = false; - spin_lock(&q_vector->lock); + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_NAPI_YIELD)); if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) rc = true; - q_vector->state = IXGBE_QV_STATE_IDLE; - spin_unlock(&q_vector->lock); + /* will reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBE_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); return rc; } @@ -451,7 +458,8 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) rc = true; - q_vector->state = IXGBE_QV_STATE_IDLE; + /* will reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBE_QV_STATE_DISABLED; spin_unlock_bh(&q_vector->lock); return rc; } @@ -459,9 +467,23 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) /* true if a socket is polling, even if it did not get the lock */ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { - WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); + WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); return q_vector->state & IXGBE_QV_USER_PEND; } + +/* false if QV is currently owned */ +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBE_QV_OWNED) + rc = false; + q_vector->state |= IXGBE_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + + return rc; +} + #else /* CONFIG_NET_RX_BUSY_POLL */ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) { @@ -491,6 +513,12 @@ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { return false; } + +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + return true; +} + #endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_IXGBE_HWMON diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 90aac31b3551..4e7c9b098b58 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2257,13 +2257,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev, #if IS_ENABLED(CONFIG_BQL) /* detect ITR changes that require update of TXDCTL.WTHRESH */ - if ((adapter->tx_itr_setting > 1) && + if ((adapter->tx_itr_setting != 1) && (adapter->tx_itr_setting < IXGBE_100K_ITR)) { if ((tx_itr_prev == 1) || - (tx_itr_prev > IXGBE_100K_ITR)) + (tx_itr_prev >= IXGBE_100K_ITR)) need_reset = true; } else { - if ((tx_itr_prev > 1) && + if ((tx_itr_prev != 1) && (tx_itr_prev < IXGBE_100K_ITR)) need_reset = true; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 43b777aad288..5191b3ca9a26 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -153,7 +153,6 @@ MODULE_VERSION(DRV_VERSION); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { - int pos = 0; struct pci_dev *parent_dev; struct pci_bus *parent_bus; @@ -165,11 +164,10 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, if (!parent_dev) return -1; - pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); - if (!pos) + if (!pci_is_pcie(parent_dev)) return -1; - pci_read_config_word(parent_dev, pos + reg, value); + pcie_capability_read_word(parent_dev, reg, value); return 0; } @@ -247,7 +245,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, max_gts = 4 * width; break; case PCIE_SPEED_8_0GT: - /* 128b/130b encoding only reduces throughput by 1% */ + /* 128b/130b encoding reduces throughput by less than 2% */ max_gts = 8 * width; break; default: @@ -265,7 +263,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, width, (speed == PCIE_SPEED_2_5GT ? "20%" : speed == PCIE_SPEED_5_0GT ? "20%" : - speed == PCIE_SPEED_8_0GT ? "N/a" : + speed == PCIE_SPEED_8_0GT ? "<2%" : "Unknown")); if (max_gts < expected_gts) { @@ -3825,14 +3823,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; - } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - vmolr |= IXGBE_VMOLR_ROMPE; } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; @@ -3849,6 +3839,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); @@ -3893,15 +3890,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - local_bh_disable(); /* for ixgbe_qv_lock_napi() */ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { + while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { pr_info("QV %d locked\n", q_idx); - mdelay(1); + usleep_range(1000, 20000); } } - local_bh_enable(); } #ifdef CONFIG_IXGBE_DCB @@ -7362,19 +7357,16 @@ static const struct net_device_ops ixgbe_netdev_ops = { **/ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; struct list_head *entry; int physfns = 0; - /* Some cards can not use the generic count PCIe functions method, and - * so must be hardcoded to the correct value. + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these with + * the correct number of functions. */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_SFP_SF_QP: - case IXGBE_DEV_ID_82599_QSFP_SF_QP: + if (ixgbe_pcie_from_parent(&adapter->hw)) { physfns = 4; - break; - default: + } else { list_for_each(entry, &adapter->pdev->bus_list) { struct pci_dev *pdev = list_entry(entry, struct pci_dev, bus_list); @@ -7759,29 +7751,6 @@ skip_sriov: if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); - /* print bus type/speed/width info */ - e_dev_info("(PCI Express:%s:%s) %pM\n", - (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" : - hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : - hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : - "Unknown"), - (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : - "Unknown"), - netdev->dev_addr); - - err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); - else - e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); - /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough * bandwidth due to being older generation PCIe parts. We clamp these @@ -7797,6 +7766,19 @@ skip_sriov: } ixgbe_check_minimum_link(adapter, expected_gts); + err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%pM\n", netdev->dev_addr); + /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); if (err == IXGBE_ERR_EEPROM_VERSION) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 84329b0d567a..54d9acef9c4e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -45,16 +45,27 @@ struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; + struct { + int sizeof_stat; + int stat_offset; + int base_stat_offset; + int saved_reset_offset; + }; }; -#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ - offsetof(struct ixgbevf_adapter, m), \ - offsetof(struct ixgbevf_adapter, b), \ - offsetof(struct ixgbevf_adapter, r) +#define IXGBEVF_STAT(m, b, r) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ + .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +} + +#define IXGBEVF_ZSTAT(m) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = -1, \ + .saved_reset_offset = -1 \ +} static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, @@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { stats.saved_reset_vfgorc)}, {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, + {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, stats.saved_reset_vfmprc)}, - {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, - zero_base)}, - {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, - zero_base)}, - {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, - zero_base)}, + {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, + {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)}, +#ifdef BP_EXTENDED_STATS + {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, + {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, + {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, + {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, + {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, + {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, +#endif }; #define IXGBE_QUEUE_STATS_LEN 0 @@ -390,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + char *base = (char *) adapter; int i; +#ifdef BP_EXTENDED_STATS + u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, + tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_yields += adapter->rx_ring[i].bp_yields; + rx_cleaned += adapter->rx_ring[i].bp_cleaned; + rx_yields += adapter->rx_ring[i].bp_yields; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_yields += adapter->tx_ring[i].bp_yields; + tx_cleaned += adapter->tx_ring[i].bp_cleaned; + tx_yields += adapter->tx_ring[i].bp_yields; + } + + adapter->bp_rx_yields = rx_yields; + adapter->bp_rx_cleaned = rx_cleaned; + adapter->bp_rx_missed = rx_missed; + + adapter->bp_tx_yields = tx_yields; + adapter->bp_tx_cleaned = tx_cleaned; + adapter->bp_tx_missed = tx_missed; +#endif ixgbevf_update_stats(adapter); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter + - ixgbe_gstrings_stats[i].stat_offset; - char *b = (char *)adapter + - ixgbe_gstrings_stats[i].base_stat_offset; - char *r = (char *)adapter + - ixgbe_gstrings_stats[i].saved_reset_offset; - data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)r : *(u32 *)r); + char *p = base + ixgbe_gstrings_stats[i].stat_offset; + char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; + + if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; + else + data[i] = *(u64 *)p; + } else { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; + else + data[i] = *(u32 *)p; + } } } @@ -634,6 +678,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev) return 0; } +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int num_vectors, i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count + && ec->tx_coalesce_usecs) + return -EINVAL; + + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, @@ -649,6 +772,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_sset_count = ixgbevf_get_sset_count, .get_strings = ixgbevf_get_strings, .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 64a2b912e73c..8971e2d0a984 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -38,6 +38,11 @@ #include "vf.h" +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#define BP_EXTENDED_STATS +#endif + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbevf_tx_buffer { @@ -76,6 +81,11 @@ struct ixgbevf_ring { struct u64_stats_sync syncp; u64 hw_csum_rx_error; u64 hw_csum_rx_good; +#ifdef BP_EXTENDED_STATS + u64 bp_yields; + u64 bp_misses; + u64 bp_cleaned; +#endif u16 head; u16 tail; @@ -145,7 +155,118 @@ struct ixgbevf_q_vector { struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define IXGBEVF_QV_STATE_IDLE 0 +#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ }; +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->bp_yields++; +#endif + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->bp_yields++; +#endif + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register @@ -165,9 +286,13 @@ struct ixgbevf_q_vector { ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG -#define IXGBE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} #define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) @@ -240,7 +365,6 @@ struct ixgbevf_adapter { struct ixgbe_hw hw; u16 msg_enable; struct ixgbevf_hw_stats stats; - u64 zero_base; /* Interrupt Throttle Rate */ u32 eitr_param; @@ -249,6 +373,16 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; +#ifdef BP_EXTENDED_STATS + u64 bp_rx_yields; + u64 bp_rx_cleaned; + u64 bp_rx_missed; + + u64 bp_tx_yields; + u64 bp_tx_cleaned; + u64 bp_tx_missed; +#endif + u32 link_speed; bool link_up; @@ -293,6 +427,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); int ethtool_ioctl(struct ifreq *ifr); +extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); + void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ce27d62f9c8e..038bfc8b7616 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.7.12-k" +#define DRV_VERSION "2.11.3-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; @@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, } /** + * ixgbevf_rx_skb - Helper function to determine proper Rx method + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_desc: rx descriptor + **/ +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb, u8 status, + union ixgbe_adv_rx_desc *rx_desc) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector)) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + ixgbevf_receive_skb(q_vector, skb, status, rx_desc); +} + +/** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: pointer to Rx descriptor ring structure * @status_err: hardware indication of status of receive @@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } -static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *rx_ring, - int budget) +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; @@ -473,15 +497,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, total_rx_bytes += skb->len; total_rx_packets++; - /* - * Work around issue of some types of VM to VM loop back - * packets not getting split correctly - */ - if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb_headlen(skb); - if (header_fixup_len < 14) - skb_push(skb, header_fixup_len); - } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast @@ -494,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, goto next_desc; } - ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); + ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; @@ -514,7 +529,7 @@ next_desc: } rx_ring->next_to_clean = i; - cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + cleaned_count = ixgbevf_desc_unused(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); @@ -526,7 +541,7 @@ next_desc: q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - return !!budget; + return total_rx_packets; } /** @@ -549,6 +564,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); +#ifdef CONFIG_NET_RX_BUSY_POLL + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif + /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) @@ -558,10 +578,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget); + clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget) + < per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); +#endif + /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -580,7 +605,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information */ -static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; @@ -596,6 +621,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->bp_cleaned += found; + else + ring->bp_misses++; +#endif + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1082,6 +1141,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + + if (adapter->num_rx_queues > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); +} + static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1129,8 +1203,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) int i, j; u32 rdlen; - /* PSRTYPE must be initialized in 82599 */ - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + ixgbevf_setup_psrtype(adapter); /* set_rx_buffer_len must be called before ring initialization */ ixgbevf_set_rx_buffer_len(adapter); @@ -1268,6 +1341,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif napi_enable(&q_vector->napi); } } @@ -1281,6 +1357,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -1298,7 +1380,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, - IXGBE_DESC_UNUSED(ring)); + ixgbevf_desc_unused(ring)); } } @@ -1931,6 +2013,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif adapter->q_vector[q_idx] = q_vector; } @@ -1940,6 +2025,9 @@ err_out: while (q_idx) { q_idx--; q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); adapter->q_vector[q_idx] = NULL; @@ -1963,6 +2051,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); } @@ -3011,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) /* We need to check again in a case another CPU has just * made room available. */ - if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) + if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -3022,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } @@ -3294,6 +3385,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) |