From c7bb417dbb8888cfd20824d54f9af9c92b9ff43d Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 1 Oct 2013 04:33:49 -0700 Subject: ixgbevf: cleanup redundant mailbox read failure check Since we are already checking for read failure in check_link we don't need to do it here. Instead just make sure the watchdog task gets scheduled, if we are up, and it can be done there. This will better follow igbvf method of handling a mailbox event and message timeout. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 29 ++--------------------- 1 file changed, 2 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59a62bbfb371..b9a78a2d486d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; - struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; - u32 msg; - bool got_ack = false; hw->mac.get_link_status = 1; - if (!hw->mbx.ops.check_for_ack(hw)) - got_ack = true; - - if (!hw->mbx.ops.check_for_msg(hw)) { - hw->mbx.ops.read(hw, &msg, 1); - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); - adapter->link_up = false; - } - - if (msg & IXGBE_VT_MSGTYPE_NACK) - dev_info(&pdev->dev, - "Last Request of type %2.2x to PF Nacked\n", - msg & 0xFF); - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; - } - - /* checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it - */ - if (got_ack) - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); -- cgit v1.2.3-59-g8ed1b From 858c3dda5ea3519a3799a147904ae1d6e6c4e7c1 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 1 Oct 2013 04:33:50 -0700 Subject: ixgbevf: add wait for Rx queue disable New function was added to wait for Rx queues to be disabled before disabling NAPI. This function also allows us to modify ixgbevf_rx_desc_queue_enable() to better match ixgbe. I also cleaned up some msleep calls to usleep_range while I was in this code anyway. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 61 ++++++++++++++++------- 1 file changed, 44 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b9a78a2d486d..e3493b0e5348 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1302,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) } } -#define IXGBE_MAX_RX_DESC_POLL 10 -static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, - int rxr) +#define IXGBEVF_MAX_RX_DESC_POLL 10 +static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, + int rxr) { struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; int j = adapter->rx_ring[rxr].reg_idx; - int k; - for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { - if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) - break; - else - msleep(1); - } - if (k >= IXGBE_MAX_RX_DESC_POLL) { - hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " - "not set within the polling period\n", rxr); - } + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n", + rxr); + + ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], + (adapter->rx_ring[rxr].count - 1)); +} - ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], - adapter->rx_ring[rxr].count - 1); +static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) @@ -1654,7 +1678,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) /* signal that we are down to the interrupt handler */ set_bit(__IXGBEVF_DOWN, &adapter->state); - /* disable receives */ + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); netif_tx_disable(netdev); -- cgit v1.2.3-59-g8ed1b From 798e381a0c387dcbcb014e07c0e6683cf2d2dc22 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 1 Oct 2013 04:33:51 -0700 Subject: ixgbevf: move API neg to reset path After this patch the API negotiation will occur in the reset path. So now the PF will be informed of the API version earlier. This will also require the mailbox lock to be initialize sooner as well. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e3493b0e5348..ce27d62f9c8e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1544,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - ixgbevf_negotiate_api(adapter); - ixgbevf_reset_queues(adapter); ixgbevf_configure(adapter); @@ -1735,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - if (hw->mac.ops.reset_hw(hw)) + if (hw->mac.ops.reset_hw(hw)) { hw_dbg(hw, "PF still resetting\n"); - else + } else { hw->mac.ops.init_hw(hw); + ixgbevf_negotiate_api(adapter); + } if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, @@ -2074,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, @@ -2084,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) pr_err("init_shared_code failed: %d\n", err); goto out; } + ixgbevf_negotiate_api(adapter); err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); if (err) dev_info(&pdev->dev, "Error reading MAC address\n"); @@ -2099,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); } - /* lock to protect mailbox accesses */ - spin_lock_init(&adapter->mbx_lock); - /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; @@ -2622,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev) } } - ixgbevf_negotiate_api(adapter); - /* setup queue reg_idx and Rx queue count */ err = ixgbevf_setup_queues(adapter); if (err) @@ -3218,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) } pci_set_master(pdev); + ixgbevf_reset(adapter); + rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); rtnl_unlock(); @@ -3226,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } - ixgbevf_reset(adapter); - if (netif_running(netdev)) { err = ixgbevf_open(netdev); if (err) -- cgit v1.2.3-59-g8ed1b From 1bb9c6390e0f6df355576b48514b393a2579c3b9 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Sat, 21 Sep 2013 01:57:33 +0000 Subject: ixgbevf: Adds function to set PSRTYPE register This patch creates a new function to set PSRTYPE. This function helps lay the ground work for eventual multi queue support. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ce27d62f9c8e..c0f9aad2cda3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1082,6 +1082,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + + if (adapter->num_rx_queues > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); +} + static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1129,8 +1144,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) int i, j; u32 rdlen; - /* PSRTYPE must be initialized in 82599 */ - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + ixgbevf_setup_psrtype(adapter); /* set_rx_buffer_len must be called before ring initialization */ ixgbevf_set_rx_buffer_len(adapter); -- cgit v1.2.3-59-g8ed1b From 3849623e03662babfdb82a6192e2f23b3f157119 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 Oct 2013 06:19:18 +0000 Subject: ixgbevf: implement ethtool get/set coalesce This patch adds support for ethtool's get_coalesce and set_coalesce command for the ixgbevf driver. This enables dynamically updating the minimum time between interrupts. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 81 +++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 2 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 3 files changed, 84 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 84329b0d567a..21adb1bc1706 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -634,6 +634,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev) return 0; } +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int num_vectors, i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count + && ec->tx_coalesce_usecs) + return -EINVAL; + + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, @@ -649,6 +728,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_sset_count = ixgbevf_get_sset_count, .get_strings = ixgbevf_get_strings, .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 64a2b912e73c..d7837dcc9897 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -293,6 +293,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); int ethtool_ioctl(struct ifreq *ifr); +extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); + void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c0f9aad2cda3..8407fd274810 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -580,7 +580,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information */ -static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; -- cgit v1.2.3-59-g8ed1b From 9e6fcae767da775df1679646749717c223440382 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Sat, 21 Sep 2013 05:21:18 +0000 Subject: ixgbevf: bump driver version Bump patch to reflect what version of the out of tree driver it has equivalent functionality with (2.11.3). Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8407fd274810..87279c8ab2b9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.7.12-k" +#define DRV_VERSION "2.11.3-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; -- cgit v1.2.3-59-g8ed1b From 08681618662f18631467a9746dea821db6f22a66 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Sat, 21 Sep 2013 06:24:09 +0000 Subject: ixgbevf: add ixgbevf_rx_skb This patch adds ixgbevf_rx_skb in line with how ixgbe handles the variations on how packets can be received. It will be extended in a following patch for CONFIG_NET_RX_BUSY_POLL support. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 87279c8ab2b9..a2cc6bb7318c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -299,6 +299,20 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, netif_rx(skb); } +/** + * ixgbevf_rx_skb - Helper function to determine proper Rx method + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_desc: rx descriptor + **/ +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb, u8 status, + union ixgbe_adv_rx_desc *rx_desc) +{ + ixgbevf_receive_skb(q_vector, skb, status, rx_desc); +} + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: pointer to Rx descriptor ring structure @@ -494,7 +508,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, goto next_desc; } - ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); + ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; -- cgit v1.2.3-59-g8ed1b From 08e50a20ed05fba11c7dbc9e325369bef6a1c194 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Sat, 21 Sep 2013 06:24:14 +0000 Subject: ixgbevf: have clean_rx_irq return total_rx_packets cleaned Rather than return true/false indicating whether there was budget left, return the total packets cleaned. This currently has no use, but will be used in a following patch which enables CONFIG_NET_RX_BUSY_POLL support in order to track how many packets were cleaned during the busy poll as part of the extended statistics. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a2cc6bb7318c..06e29bd73777 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -410,9 +410,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } -static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *rx_ring, - int budget) +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; @@ -540,7 +540,7 @@ next_desc: q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - return !!budget; + return total_rx_packets; } /** @@ -572,8 +572,9 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget); + clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget) + < per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; /* If all work not completed, return budget and keep polling */ -- cgit v1.2.3-59-g8ed1b From c777cdfa4e69548f45078165d17828dd6711120f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Sat, 21 Sep 2013 06:24:20 +0000 Subject: ixgbevf: implement CONFIG_NET_RX_BUSY_POLL This patch enables CONFIG_NET_RX_BUSY_POLL support in the VF code. This enables sockets which have enabled the SO_BUSY_POLL socket option to use the ndo_busy_poll_recv operation which could result in lower latency, at the cost of higher CPU utilization, and increased power usage. This support is similar to how the ixgbe driver works. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 109 ++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 68 ++++++++++++++ 2 files changed, 177 insertions(+) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index d7837dcc9897..85c7560e237b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -38,6 +38,10 @@ #include "vf.h" +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#endif + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbevf_tx_buffer { @@ -145,7 +149,112 @@ struct ixgbevf_q_vector { struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define IXGBEVF_QV_STATE_IDLE 0 +#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ }; +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 06e29bd73777..bc03a2ec5c0a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -310,6 +310,16 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb, u8 status, union ixgbe_adv_rx_desc *rx_desc) { +#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector)) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + ixgbevf_receive_skb(q_vector, skb, status, rx_desc); } @@ -563,6 +573,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); +#ifdef CONFIG_NET_RX_BUSY_POLL + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif + /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) @@ -577,6 +592,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) < per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); +#endif + /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -611,6 +630,34 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1297,6 +1344,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif napi_enable(&q_vector->napi); } } @@ -1310,6 +1360,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -1960,6 +2016,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif adapter->q_vector[q_idx] = q_vector; } @@ -1969,6 +2028,9 @@ err_out: while (q_idx) { q_idx--; q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); adapter->q_vector[q_idx] = NULL; @@ -1992,6 +2054,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); } @@ -3323,6 +3388,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) -- cgit v1.2.3-59-g8ed1b From 3b5dca262f52793fdff4d0d970e8f1cec3f7f2ef Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Sat, 21 Sep 2013 06:24:25 +0000 Subject: ixgbevf: add BP_EXTENDED_STATS for CONFIG_NET_RX_BUSY_POLL This patch adds the extended statistics similar to the ixgbe driver. These statistics keep track of how often the busy polling yields, as well as how many packets are cleaned or missed by the polling routine. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 32 +++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 22 ++++++++++++++++ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +++++ 3 files changed, 60 insertions(+) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 21adb1bc1706..44f7c42d8f58 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -74,6 +74,14 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { zero_base)}, {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, zero_base)}, +#ifdef BP_EXTENDED_STATS + {"rx_bp_poll_yield", IXGBEVF_STAT(bp_rx_yields, zero_base, zero_base)}, + {"rx_bp_cleaned", IXGBEVF_STAT(bp_rx_cleaned, zero_base, zero_base)}, + {"rx_bp_misses", IXGBEVF_STAT(bp_rx_missed, zero_base, zero_base)}, + {"tx_bp_napi_yield", IXGBEVF_STAT(bp_tx_yields, zero_base, zero_base)}, + {"tx_bp_cleaned", IXGBEVF_STAT(bp_tx_cleaned, zero_base, zero_base)}, + {"tx_bp_misses", IXGBEVF_STAT(bp_tx_missed, zero_base, zero_base)}, +#endif }; #define IXGBE_QUEUE_STATS_LEN 0 @@ -391,6 +399,30 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int i; +#ifdef BP_EXTENDED_STATS + u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, + tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_yields += adapter->rx_ring[i].bp_yields; + rx_cleaned += adapter->rx_ring[i].bp_cleaned; + rx_yields += adapter->rx_ring[i].bp_yields; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_yields += adapter->tx_ring[i].bp_yields; + tx_cleaned += adapter->tx_ring[i].bp_cleaned; + tx_yields += adapter->tx_ring[i].bp_yields; + } + + adapter->bp_rx_yields = rx_yields; + adapter->bp_rx_cleaned = rx_cleaned; + adapter->bp_rx_missed = rx_missed; + + adapter->bp_tx_yields = tx_yields; + adapter->bp_tx_cleaned = tx_cleaned; + adapter->bp_tx_missed = tx_missed; +#endif ixgbevf_update_stats(adapter); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 85c7560e237b..78e4c510bd75 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -40,6 +40,7 @@ #ifdef CONFIG_NET_RX_BUSY_POLL #include +#define BP_EXTENDED_STATS #endif /* wrapper around a pointer to a socket buffer, @@ -80,6 +81,11 @@ struct ixgbevf_ring { struct u64_stats_sync syncp; u64 hw_csum_rx_error; u64 hw_csum_rx_good; +#ifdef BP_EXTENDED_STATS + u64 bp_yields; + u64 bp_misses; + u64 bp_cleaned; +#endif u16 head; u16 tail; @@ -181,6 +187,9 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->bp_yields++; +#endif } else { /* we don't care if someone yielded */ q_vector->state = IXGBEVF_QV_STATE_NAPI; @@ -213,6 +222,9 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) if ((q_vector->state & IXGBEVF_QV_LOCKED)) { q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->bp_yields++; +#endif } else { /* preserve yield marks */ q_vector->state |= IXGBEVF_QV_STATE_POLL; @@ -358,6 +370,16 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; +#ifdef BP_EXTENDED_STATS + u64 bp_rx_yields; + u64 bp_rx_cleaned; + u64 bp_rx_missed; + + u64 bp_tx_yields; + u64 bp_tx_cleaned; + u64 bp_tx_missed; +#endif + u32 link_speed; bool link_up; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index bc03a2ec5c0a..b16b694951b8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -648,6 +648,12 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi) ixgbevf_for_each_ring(ring, q_vector->rx) { found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->bp_cleaned += found; + else + ring->bp_misses++; +#endif if (found) break; } -- cgit v1.2.3-59-g8ed1b From 76b81748d43f2c60774c2703e3a9390bcc552adb Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 29 Oct 2013 08:31:49 +0000 Subject: ixgbevf: remove redundant workaround This patch removes a workaround related to header split, which is redundant because the driver does not support splitting packet headers on Rx. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b16b694951b8..9685354e6b9d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -497,15 +497,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, total_rx_bytes += skb->len; total_rx_packets++; - /* - * Work around issue of some types of VM to VM loop back - * packets not getting split correctly - */ - if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb_headlen(skb); - if (header_fixup_len < 14) - skb_push(skb, header_fixup_len); - } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast -- cgit v1.2.3-59-g8ed1b From f880d07bc5bc9f453be7b1fc9c1a34853719d148 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 23 Oct 2013 02:17:52 +0000 Subject: ixgbe: cleanup IXGBE_DESC_UNUSED This patch just replaces the IXGBE_DESC_UNUSED macro with a like named inline function ixgbevf_desc_unused. The inline function makes the logic a bit more readable. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 10 +++++++--- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 10 +++++----- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index acf38067a700..8971e2d0a984 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -286,9 +286,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG -#define IXGBE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} #define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9685354e6b9d..038bfc8b7616 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -529,7 +529,7 @@ next_desc: } rx_ring->next_to_clean = i; - cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + cleaned_count = ixgbevf_desc_unused(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); @@ -1380,7 +1380,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, - IXGBE_DESC_UNUSED(ring)); + ixgbevf_desc_unused(ring)); } } @@ -3102,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) /* We need to check again in a case another CPU has just * made room available. */ - if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) + if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -3113,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } -- cgit v1.2.3-59-g8ed1b