From b29a21694f7d12e40537e1e587ec47725849769b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2011 07:18:26 +0000 Subject: ixgbe: remove ntuple filtering Due to numerous issues in ntuple filters it has been decided to move the interface over to the network flow classification interface. As a first step to achieving this I first need to remove the old ntuple interface. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 136 -------------------------------------- 1 file changed, 136 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 4950d03d3ef8..e41dd242cfcf 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2336,141 +2336,6 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) return 0; } -static int ixgbe_set_rx_ntuple(struct net_device *dev, - struct ethtool_rx_ntuple *cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs; - union ixgbe_atr_input input_struct; - struct ixgbe_atr_input_masks input_masks; - int target_queue; - int err; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - return -EOPNOTSUPP; - - /* - * Don't allow programming if the action is a queue greater than - * the number of online Tx queues. - */ - if ((fs->action >= adapter->num_tx_queues) || - (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP)) - return -EINVAL; - - memset(&input_struct, 0, sizeof(union ixgbe_atr_input)); - memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); - - /* record flow type */ - switch (fs->flow_type) { - case IPV4_FLOW: - input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; - break; - case TCP_V4_FLOW: - input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - break; - case UDP_V4_FLOW: - input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - case SCTP_V4_FLOW: - input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; - break; - default: - return -1; - } - - /* copy vlan tag minus the CFI bit */ - if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) { - input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF); - if (!fs->vlan_tag_mask) { - input_masks.vlan_id_mask = htons(0xEFFF); - } else { - switch (~fs->vlan_tag_mask & 0xEFFF) { - /* all of these are valid vlan-mask values */ - case 0xEFFF: - case 0xE000: - case 0x0FFF: - case 0x0000: - input_masks.vlan_id_mask = - htons(~fs->vlan_tag_mask); - break; - /* exit with error if vlan-mask is invalid */ - default: - e_err(drv, "Partial VLAN ID or " - "priority mask in vlan-mask is not " - "supported by hardware\n"); - return -1; - } - } - } - - /* make sure we only use the first 2 bytes of user data */ - if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) { - input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF); - if (!(fs->data_mask & 0xFFFF)) { - input_masks.flex_mask = 0xFFFF; - } else if (~fs->data_mask & 0xFFFF) { - e_err(drv, "Partial user-def-mask is not " - "supported by hardware\n"); - return -1; - } - } - - /* - * Copy input into formatted structures - * - * These assignments are based on the following logic - * If neither input or mask are set assume value is masked out. - * If input is set, but mask is not mask should default to accept all. - * If input is not set, but mask is set then mask likely results in 0. - * If input is set and mask is set then assign both. - */ - if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) { - input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src; - if (!fs->m_u.tcp_ip4_spec.ip4src) - input_masks.src_ip_mask[0] = 0xFFFFFFFF; - else - input_masks.src_ip_mask[0] = - ~fs->m_u.tcp_ip4_spec.ip4src; - } - if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) { - input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst; - if (!fs->m_u.tcp_ip4_spec.ip4dst) - input_masks.dst_ip_mask[0] = 0xFFFFFFFF; - else - input_masks.dst_ip_mask[0] = - ~fs->m_u.tcp_ip4_spec.ip4dst; - } - if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) { - input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc; - if (!fs->m_u.tcp_ip4_spec.psrc) - input_masks.src_port_mask = 0xFFFF; - else - input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc; - } - if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) { - input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst; - if (!fs->m_u.tcp_ip4_spec.pdst) - input_masks.dst_port_mask = 0xFFFF; - else - input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst; - } - - /* determine if we need to drop or route the packet */ - if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP) - target_queue = MAX_RX_QUEUES - 1; - else - target_queue = fs->action; - - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, - &input_struct, - &input_masks, 0, - target_queue); - spin_unlock(&adapter->fdir_perfect_lock); - - return err ? -1 : 0; -} - static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2506,7 +2371,6 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_flags = ethtool_op_get_flags, .set_flags = ixgbe_set_flags, - .set_rx_ntuple = ixgbe_set_rx_ntuple, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3-59-g8ed1b From 03ecf91aae757eeb70763a3393227c4597c87b23 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 20 May 2011 07:36:17 +0000 Subject: ixgbe: fix flags relating to perfect filters to support coexistence I am removing the requirement that Ntuple filters have the same number of queues and requirements as ATR. As a result this change will make it so that all the Ntuple flag does is disable ATR for now. This change fixes an issue in which we were incorrectly re-enabling ATR when we exited perfect filter mode. This was due to the fact that the logic assumed RSS and DCB were mutually exclusive which is no longer the case. To correct this we just need to add a check to guarantee DCB is disabled before re-enabling ATR. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb_nl.c | 13 ++++++------- drivers/net/ixgbe/ixgbe_ethtool.c | 24 ++++++++++++------------ drivers/net/ixgbe/ixgbe_main.c | 34 ++++++++++++---------------------- 3 files changed, 30 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 08c7aebe99f5..bd2d75265389 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -114,11 +114,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) u8 err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); + /* verify there is something to do, if not then exit */ + if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return err; + if (state > 0) { /* Turn on DCB */ - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - goto out; - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { e_err(drv, "Enable failed, needs MSI-X\n"); err = 1; @@ -143,9 +144,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); } else { /* Turn off DCB */ - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - goto out; - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; @@ -153,7 +151,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; break; default: break; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index e41dd242cfcf..a2d8ed506053 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2310,20 +2310,20 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ - if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) && - (!(data & ETH_FLAG_NTUPLE))) { - /* turn off Flow Director perfect, set hash and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* turn off ATR, enable perfect filters and reset */ + if (data & ETH_FLAG_NTUPLE) { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + need_reset = true; + } + } else if (!(data & ETH_FLAG_NTUPLE)) { + /* turn off Flow Director, set ATR and reset */ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - need_reset = true; - } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) && - (data & ETH_FLAG_NTUPLE)) { - /* turn off Flow Director hash, enable perfect and reset */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; need_reset = true; - } else { - /* no state change */ } if (need_reset) { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 4cd66ae70ccc..5483b9c3e2c0 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1555,9 +1555,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) q_vector->eitr = adapter->rx_eitr_param; ixgbe_write_eitr(q_vector); - /* If Flow Director is enabled, set interrupt affinity */ - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* If ATR is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* * Allocate the affinity_hint cpumask, assign the mask * for this vector, and set our affinity_hint for @@ -2468,8 +2467,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, default: break; } - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || - adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); @@ -3767,8 +3765,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) adapter->tx_ring[i]->atr_sample_rate = adapter->atr_sample_rate; ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); - } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); } ixgbe_configure_virtualization(adapter); @@ -4334,15 +4330,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) f_fdir->mask = 0; /* Flow Director must have RSS enabled */ - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && - ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { adapter->num_tx_queues = f_fdir->indices; adapter->num_rx_queues = f_fdir->indices; ret = true; } else { adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; } return ret; } @@ -4372,12 +4366,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { e_info(probe, "FCoE enabled with RSS\n"); - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ixgbe_set_fdir_queues(adapter); else ixgbe_set_rss_queues(adapter); } + /* adding FCoE rx rings to the end */ f->mask = adapter->num_rx_queues; adapter->num_rx_queues += f->indices; @@ -4670,9 +4664,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) int i; bool ret = false; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && - ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; for (i = 0; i < adapter->num_tx_queues; i++) @@ -4701,8 +4694,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) return false; if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ixgbe_cache_ring_fdir(adapter); else ixgbe_cache_ring_rss(adapter); @@ -4882,14 +4874,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | - IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { e_err(probe, - "Flow Director is not supported while multiple " + "ATR is not supported while multiple " "queues are disabled. Disabling Flow Director\n"); } adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; adapter->atr_sample_rate = 0; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); -- cgit v1.2.3-59-g8ed1b From c04f6ca84866ef207e009a08e4c34ca241df7aa2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2011 07:18:36 +0000 Subject: ixgbe: update perfect filter framework to support retaining filters This change is meant to update the internal framework of ixgbe so that perfect filters can be stored and tracked via software. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 18 +- drivers/net/ixgbe/ixgbe_82599.c | 603 +++++++++++++++++++++++----------------- drivers/net/ixgbe/ixgbe_main.c | 2 +- drivers/net/ixgbe/ixgbe_type.h | 25 +- 4 files changed, 368 insertions(+), 280 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index d5674fc8bc02..5ea5b4c08fe0 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -543,16 +543,22 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); -extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); +extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue); -extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - struct ixgbe_atr_input_masks *input_masks, - u16 soft_id, u8 queue); +extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 4a6826bf9338..3b3dd4df4c5c 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -1107,115 +1107,87 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) } /** - * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer * @hw: pointer to hardware structure * @pballoc: which mode to allocate filters with **/ -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) +static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc) { - u32 fdirctrl = 0; + u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT; + u32 current_rxpbsize = 0; int i; - /* Send interrupt when 64 filters are left */ - fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; - - /* Set the maximum length per hash bucket to 0xA filters */ - fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; - + /* reserve space for Flow Director filters */ switch (pballoc) { - case IXGBE_FDIR_PBALLOC_64K: - /* 8k - 1 signature filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + case IXGBE_FDIR_PBALLOC_256K: + fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT; break; case IXGBE_FDIR_PBALLOC_128K: - /* 16k - 1 signature filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT; break; - case IXGBE_FDIR_PBALLOC_256K: - /* 32k - 1 signature filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + case IXGBE_FDIR_PBALLOC_64K: + fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT; break; + case IXGBE_FDIR_PBALLOC_NONE: default: - /* bad value */ - return IXGBE_ERR_CONFIG; + return IXGBE_ERR_PARAM; } - /* Move the flexible bytes to use the ethertype - shift 6 words */ - fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); + /* determine current RX packet buffer size */ + for (i = 0; i < 8; i++) + current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + /* if there is already room for the filters do nothing */ + if (current_rxpbsize <= fdir_pbsize) + return 0; - /* Prime the keys for hashing */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); - - /* - * Poll init-done after we write the register. Estimated times: - * 10G: PBALLOC = 11b, timing is 60us - * 1G: PBALLOC = 11b, timing is 600us - * 100M: PBALLOC = 11b, timing is 6ms - * - * Multiple these timings by 4 if under full Rx load - * - * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for - * 1 msec per poll time. If we're at line rate and drop to 100M, then - * this might not finish in our poll time, but we can live with that - * for now. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); - IXGBE_WRITE_FLUSH(hw); - for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { - if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) - break; - usleep_range(1000, 2000); + if (current_rxpbsize > hw->mac.rx_pb_size) { + /* + * if rxpbsize is greater than max then HW max the Rx buffer + * sizes are unconfigured or misconfigured since HW default is + * to give the full buffer to each traffic class resulting in + * the total size being buffer size 8x actual size + * + * This assumes no DCB since the RXPBSIZE registers appear to + * be unconfigured. + */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize); + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + } else { + /* + * Since the Rx packet buffer appears to have already been + * configured we need to shrink each packet buffer by enough + * to make room for the filters. As such we take each rxpbsize + * value and multiply it by a fraction representing the size + * needed over the size we currently have. + * + * We need to reduce fdir_pbsize and current_rxpbsize to + * 1/1024 of their original values in order to avoid + * overflowing the u32 being used to store rxpbsize. + */ + fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT; + current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT; + for (i = 0; i < 8; i++) { + u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize *= fdir_pbsize; + rxpbsize /= current_rxpbsize; + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } } - if (i >= IXGBE_FDIR_INIT_DONE_POLL) - hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); return 0; } /** - * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers * @hw: pointer to hardware structure - * @pballoc: which mode to allocate filters with + * @fdirctrl: value to write to flow director control register **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { - u32 fdirctrl = 0; int i; - /* Send interrupt when 64 filters are left */ - fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; - - /* Initialize the drop queue to Rx queue 127 */ - fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); - - switch (pballoc) { - case IXGBE_FDIR_PBALLOC_64K: - /* 2k - 1 perfect filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; - break; - case IXGBE_FDIR_PBALLOC_128K: - /* 4k - 1 perfect filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; - break; - case IXGBE_FDIR_PBALLOC_256K: - /* 8k - 1 perfect filters */ - fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; - break; - default: - /* bad value */ - return IXGBE_ERR_CONFIG; - } - - /* Turn perfect match filtering on */ - fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; - fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; - - /* Move the flexible bytes to use the ethertype - shift 6 words */ - fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); - /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); @@ -1233,10 +1205,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) * this might not finish in our poll time, but we can live with that * for now. */ - - /* Set the maximum length per hash bucket to 0xA filters */ - fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { @@ -1245,101 +1213,77 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) break; usleep_range(1000, 2000); } - if (i >= IXGBE_FDIR_INIT_DONE_POLL) - hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); - return 0; + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director poll time exceeded!\n"); } - /** - * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR - * @stream: input bitstream to compute the hash on - * @key: 32-bit hash key + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation **/ -static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, - u32 key) +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { - /* - * The algorithm is as follows: - * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 - * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] - * and A[n] x B[n] is bitwise AND between same length strings - * - * K[n] is 16 bits, defined as: - * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] - * for n modulo 32 < 15, K[n] = - * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] - * - * S[n] is 16 bits, defined as: - * for n >= 15, S[n] = S[n:n - 15] - * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] - * - * To simplify for programming, the algorithm is implemented - * in software this way: - * - * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] - * - * for (i = 0; i < 352; i+=32) - * hi_hash_dword[31:0] ^= Stream[(i+31):i]; - * - * lo_hash_dword[15:0] ^= Stream[15:0]; - * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; - * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; - * - * hi_hash_dword[31:0] ^= Stream[351:320]; - * - * if(key[0]) - * hash[15:0] ^= Stream[15:0]; - * - * for (i = 0; i < 16; i++) { - * if (key[i]) - * hash[15:0] ^= lo_hash_dword[(i+15):i]; - * if (key[i + 16]) - * hash[15:0] ^= hi_hash_dword[(i+15):i]; - * } - * - */ - __be32 common_hash_dword = 0; - u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 hash_result = 0; - u8 i; + s32 err; - /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(atr_input->dword_stream[0]); + /* Before enabling Flow Director, verify the Rx Packet Buffer size */ + err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); + if (err) + return err; - /* generate common hash dword */ - for (i = 10; i; i -= 2) - common_hash_dword ^= atr_input->dword_stream[i] ^ - atr_input->dword_stream[i - 1]; + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); - hi_hash_dword = ntohl(common_hash_dword); + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); - /* low dword is word swapped version of common */ - lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + return 0; +} - /* apply flow ID/VM pool/VLAN ID bits to hash words */ - hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + s32 err; - /* Process bits 0 and 16 */ - if (key & 0x0001) hash_result ^= lo_hash_dword; - if (key & 0x00010000) hash_result ^= hi_hash_dword; + /* Before enabling Flow Director, verify the Rx Packet Buffer size */ + err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); + if (err) + return err; /* - * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to - * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left */ - lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); - /* process the remaining 30 bits in the key 2 bits at a time */ - for (i = 15; i; i-- ) { - if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; - if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; - } - - return hash_result & IXGBE_ATR_HASH_MASK; + return 0; } /* @@ -1476,7 +1420,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, */ fdirhashcmd = (u64)fdircmd << 32; fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); - IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); @@ -1484,6 +1427,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, return 0; } +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0); + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applys the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + + /* Apply masks to input data */ + input->dword_stream[0] &= input_mask->dword_stream[0]; + input->dword_stream[1] &= input_mask->dword_stream[1]; + input->dword_stream[2] &= input_mask->dword_stream[2]; + input->dword_stream[3] &= input_mask->dword_stream[3]; + input->dword_stream[4] &= input_mask->dword_stream[4]; + input->dword_stream[5] &= input_mask->dword_stream[5]; + input->dword_stream[6] &= input_mask->dword_stream[6]; + input->dword_stream[7] &= input_mask->dword_stream[7]; + input->dword_stream[8] &= input_mask->dword_stream[8]; + input->dword_stream[9] &= input_mask->dword_stream[9]; + input->dword_stream[10] &= input_mask->dword_stream[10]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + hi_hash_dword = ntohl(input->dword_stream[1] ^ + input->dword_stream[2] ^ + input->dword_stream[3] ^ + input->dword_stream[4] ^ + input->dword_stream[5] ^ + input->dword_stream[6] ^ + input->dword_stream[7] ^ + input->dword_stream[8] ^ + input->dword_stream[9] ^ + input->dword_stream[10]); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(1); + IXGBE_COMPUTE_BKT_HASH_ITERATION(2); + IXGBE_COMPUTE_BKT_HASH_ITERATION(3); + IXGBE_COMPUTE_BKT_HASH_ITERATION(4); + IXGBE_COMPUTE_BKT_HASH_ITERATION(5); + IXGBE_COMPUTE_BKT_HASH_ITERATION(6); + IXGBE_COMPUTE_BKT_HASH_ITERATION(7); + IXGBE_COMPUTE_BKT_HASH_ITERATION(8); + IXGBE_COMPUTE_BKT_HASH_ITERATION(9); + IXGBE_COMPUTE_BKT_HASH_ITERATION(10); + IXGBE_COMPUTE_BKT_HASH_ITERATION(11); + IXGBE_COMPUTE_BKT_HASH_ITERATION(12); + IXGBE_COMPUTE_BKT_HASH_ITERATION(13); + IXGBE_COMPUTE_BKT_HASH_ITERATION(14); + IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + /** * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks * @input_mask: mask to be bit swapped @@ -1493,11 +1531,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, * generate a correctly swapped value we need to bit swap the mask and that * is what is accomplished by this function. **/ -static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { - u32 mask = ntohs(input_masks->dst_port_mask); + u32 mask = ntohs(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; - mask |= ntohs(input_masks->src_port_mask); + mask |= ntohs(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); @@ -1519,52 +1557,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) #define IXGBE_STORE_AS_BE16(_value) \ - (((u16)(_value) >> 8) | ((u16)(_value) << 8)) + ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) -/** - * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter - * @hw: pointer to hardware structure - * @input: input bitstream - * @input_masks: bitwise masks for relevant fields - * @soft_id: software index into the silicon hash tables for filter storage - * @queue: queue index to direct traffic to - * - * Note that the caller to this function must lock before calling, since the - * hardware writes must be protected from one another. - **/ -s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - struct ixgbe_atr_input_masks *input_masks, - u16 soft_id, u8 queue) +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask) { - u32 fdirhash; - u32 fdircmd; - u32 fdirport, fdirtcpm; - u32 fdirvlan; - /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ - u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | - IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; - - /* - * Check flow_type formatting, and bail out before we touch the hardware - * if there's a configuration issue - */ - switch (input->formatted.flow_type) { - case IXGBE_ATR_FLOW_TYPE_IPV4: - /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ - fdirm |= IXGBE_FDIRM_L4P; - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - if (input_masks->dst_port_mask || input_masks->src_port_mask) { - hw_dbg(hw, " Error on src/dst port mask\n"); - return IXGBE_ERR_CONFIG; - } - case IXGBE_ATR_FLOW_TYPE_TCPV4: - case IXGBE_ATR_FLOW_TYPE_UDPV4: - break; - default: - hw_dbg(hw, " Error on flow type input\n"); - return IXGBE_ERR_CONFIG; - } + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; /* * Program the relevant mask registers. If src/dst_port or src/dst_addr @@ -1576,41 +1576,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, * point in time. */ - /* Program FDIRM */ - switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { - case 0xEFFF: - /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ - fdirm &= ~IXGBE_FDIRM_VLANID; - case 0xE000: - /* Unmask VLAN prio - bit 1 */ - fdirm &= ~IXGBE_FDIRM_VLANP; + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: break; - case 0x0FFF: - /* Unmask VLAN ID - bit 0 */ - fdirm &= ~IXGBE_FDIRM_VLANID; + default: + hw_dbg(hw, " Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: break; + default: + hw_dbg(hw, " Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: - /* do nothing, vlans already masked */ + /* mask VLAN ID, fall through to mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only, fall through */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0xEFFF: + /* no VLAN fields masked */ break; default: hw_dbg(hw, " Error on VLAN mask\n"); return IXGBE_ERR_CONFIG; } - if (input_masks->flex_mask & 0xFFFF) { - if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { - hw_dbg(hw, " Error on flexible byte mask\n"); - return IXGBE_ERR_CONFIG; - } - /* Unmask Flex Bytes - bit 4 */ - fdirm &= ~IXGBE_FDIRM_FLEX; + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + fdirm |= IXGBE_FDIRM_FLEX; + case 0xFFFF: + break; + default: + hw_dbg(hw, " Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); /* write both the same so that UDP and TCP use the same mask */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); @@ -1618,24 +1648,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_masks->src_ip_mask[0]); + ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_masks->dst_ip_mask[0]); + ~input_mask->formatted.dst_ip[0]); - /* Apply masks to input data */ - input->formatted.vlan_id &= input_masks->vlan_id_mask; - input->formatted.flex_bytes &= input_masks->flex_mask; - input->formatted.src_port &= input_masks->src_port_mask; - input->formatted.dst_port &= input_masks->dst_port_mask; - input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; - input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; + return 0; +} - /* record vlan (little-endian) and flex_bytes(big-endian) */ - fdirvlan = - IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); - fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; - fdirvlan |= ntohs(input->formatted.vlan_id); - IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); /* record source and destination port (little-endian)*/ fdirport = ntohs(input->formatted.dst_port); @@ -1643,29 +1681,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, fdirport |= ntohs(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); - /* record the first 32 bits of the destination address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; - /* we only want the bucket hash so drop the upper 16 bits */ - fdirhash = ixgbe_atr_compute_hash_82599(input, - IXGBE_ATR_BUCKET_HASH_KEY); - fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); return 0; } +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd = 0; + u32 retry_count; + s32 err = 0; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + for (retry_count = 10; retry_count; retry_count--) { + /* allow 10us for query to process */ + udelay(10); + /* verify query completed successfully */ + fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + break; + } + + if (!retry_count) + err = IXGBE_ERR_FDIR_REINIT_FAILED; + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return err; +} + /** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5483b9c3e2c0..e177b5d061fe 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5130,7 +5130,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->atr_sample_rate = 20; adapter->ring_feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; - adapter->fdir_pballoc = 0; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 9a499a61d141..8b1abd47056f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2056,9 +2056,10 @@ enum { #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) enum ixgbe_fdir_pballoc_type { - IXGBE_FDIR_PBALLOC_64K = 0, - IXGBE_FDIR_PBALLOC_128K, - IXGBE_FDIR_PBALLOC_256K, + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, }; #define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 @@ -2112,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 #define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 #define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 -#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 #define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 #define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 #define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 @@ -2131,6 +2132,8 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIR_DROP_QUEUE 127 + /* Manageablility Host Interface defines */ #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ #define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ @@ -2350,7 +2353,7 @@ union ixgbe_atr_input { * src_port - 2 bytes * dst_port - 2 bytes * flex_bytes - 2 bytes - * rsvd0 - 2 bytes - space reserved must be 0. + * bkt_hash - 2 bytes */ struct { u8 vm_pool; @@ -2361,7 +2364,7 @@ union ixgbe_atr_input { __be16 src_port; __be16 dst_port; __be16 flex_bytes; - __be16 rsvd0; + __be16 bkt_hash; } formatted; __be32 dword_stream[11]; }; @@ -2382,16 +2385,6 @@ union ixgbe_atr_hash_dword { __be32 dword; }; -struct ixgbe_atr_input_masks { - __be16 rsvd0; - __be16 vlan_id_mask; - __be32 dst_ip_mask[4]; - __be32 src_ip_mask[4]; - __be16 src_port_mask; - __be16 dst_port_mask; - __be16 flex_mask; -}; - enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, -- cgit v1.2.3-59-g8ed1b From 91cd94bfe4f00fccf692e32dfa86a9fad0d61280 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2011 07:18:41 +0000 Subject: ixgbe: add basic support for setting and getting nfc controls This change adds basic support for the obtaining of RSS ring counts. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index a2d8ed506053..837324429f54 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2336,6 +2336,24 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) return 0; } +static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + void *rule_locs) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2371,6 +2389,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_flags = ethtool_op_get_flags, .set_flags = ixgbe_set_flags, + .get_rxnfc = ixgbe_get_rxnfc, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3-59-g8ed1b From 3e05334f8be83e8529f1cbf4f4dea06a4d51d676 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2011 07:18:47 +0000 Subject: ixgbe: add support for displaying ntuple filters via the nfc interface This code adds support for displaying the filters that were added via the nfc interface. This is primarily to test the interface for now, but I am also looking into the feasibility of moving all of the ntuple filter code in ixgbe over to the nfc interface since it seems to be better implemented. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 11 ++++ drivers/net/ixgbe/ixgbe_ethtool.c | 102 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 5ea5b4c08fe0..d6bfb2f6ba86 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -482,6 +482,17 @@ struct ixgbe_adapter { struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; bool antispoofing_enabled; + + struct hlist_head fdir_filter_list; + union ixgbe_atr_input fdir_mask; + int fdir_filter_count; +}; + +struct ixgbe_fdir_filter { + struct hlist_node fdir_node; + union ixgbe_atr_input filter; + u16 sw_idx; + u16 action; }; enum ixbge_state_t { diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 837324429f54..649e5960f249 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2336,6 +2336,97 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) return 0; } +static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union ixgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; + fsp->m_ext.vlan_tci = mask->formatted.vlan_id; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == IXGBE_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + return 0; +} + static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, void *rule_locs) { @@ -2347,6 +2438,17 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_rx_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + break; default: break; } -- cgit v1.2.3-59-g8ed1b From e4911d57a45ca30771c64b56e552891fcd105070 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 11 May 2011 07:18:52 +0000 Subject: ixgbe: add support for nfc addition and removal of filters This change is meant to allow for nfc to insert and remove filters in order to test the ethtool interface which includes it's own rules manager. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 245 ++++++++++++++++++++++++++++++++++++++ drivers/net/ixgbe/ixgbe_main.c | 45 +++++++ 2 files changed, 290 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 649e5960f249..2965b6e7728b 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2456,6 +2456,250 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *node2, *parent; + struct ixgbe_fdir_filter *rule; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = ixgbe_fdir_erase_perfect_filter_82599(hw, + &rule->filter, + sw_idx); + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + } + + /* + * If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_after(parent, &input->fdir_node); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; + int err; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) + return -EINVAL; + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!ixgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; + mask.formatted.vlan_id = fsp->m_ext.vlan_tci; + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = IXGBE_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Only one mask supported per port\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + + /* program filters to filter memory */ + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, + adapter->rx_ring[input->action]->reg_idx); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2492,6 +2736,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_flags = ethtool_op_get_flags, .set_flags = ixgbe_set_flags, .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e177b5d061fe..2886daec70ae 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3741,6 +3741,28 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); } +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + ixgbe_fdir_write_perfect_filter_82599(hw, + &filter->filter, + filter->sw_idx, + filter->action); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3765,6 +3787,10 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) adapter->tx_ring[i]->atr_sample_rate = adapter->atr_sample_rate; ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, + adapter->fdir_pballoc); + ixgbe_fdir_filter_restore(adapter); } ixgbe_configure_virtualization(adapter); @@ -4141,6 +4167,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) ixgbe_clean_tx_ring(adapter->tx_ring[i]); } +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -5527,6 +5570,8 @@ static int ixgbe_close(struct net_device *netdev) ixgbe_down(adapter); ixgbe_free_irq(adapter); + ixgbe_fdir_filter_exit(adapter); + ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); -- cgit v1.2.3-59-g8ed1b From 3a28926451a22a2b699962e738c8540da642c319 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 13 May 2011 02:22:40 +0000 Subject: ixgbe: move setting RSC into a separate function Move setting RSC into a separate function to allow for reuse in other parts of the code. Signed-off-by: Emil Tantilov Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 2965b6e7728b..405c5ba1d561 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -459,6 +459,21 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) return 0; } +static void ixgbe_set_rsc(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + set_ring_rsc_enabled(ring); + ixgbe_configure_rscctl(adapter, ring); + } else { + ixgbe_clear_rscctl(adapter, ring); + } + } +} + static u32 ixgbe_get_tx_csum(struct net_device *netdev) { return (netdev->features & NETIF_F_IP_CSUM) != 0; @@ -2281,25 +2296,12 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) } else { adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + ixgbe_set_rsc(adapter); + break; case ixgbe_mac_82599EB: need_reset = true; break; - case ixgbe_mac_X540: { - int i; - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = - adapter->rx_ring[i]; - if (adapter->flags2 & - IXGBE_FLAG2_RSC_ENABLED) { - ixgbe_configure_rscctl(adapter, - ring); - } else { - ixgbe_clear_rscctl(adapter, - ring); - } - } - } - break; default: break; } -- cgit v1.2.3-59-g8ed1b From c988ee829074073d3cd80090ef56a6e370b5c9b4 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 13 May 2011 02:22:45 +0000 Subject: ixgbe: move reset code into a separate function Move reset code into a separate function to allow for reuse in other parts of the code. Signed-off-by: Emil Tantilov Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 405c5ba1d561..bb8441e3990c 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -442,6 +442,16 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, return 0; } +static void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + static u32 ixgbe_get_rx_csum(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -2249,12 +2259,8 @@ static int ixgbe_set_coalesce(struct net_device *netdev, * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings * also locks in RSC enable/disable which requires reset */ - if (need_reset) { - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - } + if (need_reset) + ixgbe_do_reset(netdev); return 0; } @@ -2328,12 +2334,8 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) need_reset = true; } - if (need_reset) { - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - } + if (need_reset) + ixgbe_do_reset(netdev); return 0; } -- cgit v1.2.3-59-g8ed1b From 2ba279e2f72a9112a2536eabc8fae5413bbbdfb6 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 13 May 2011 02:22:50 +0000 Subject: ixgbe: disable RSC when Rx checksum is off Disabling Rx checksumming leads to performance degradation due to RSC causing packets to have incorrect checksums. Signed-off-by: Emil Tantilov Tested-by: Evan Swanson Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 44 +++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index bb8441e3990c..596d7943024f 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -458,17 +458,6 @@ static u32 ixgbe_get_rx_csum(struct net_device *netdev) return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED; } -static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (data) - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - - return 0; -} - static void ixgbe_set_rsc(struct ixgbe_adapter *adapter) { int i; @@ -484,6 +473,39 @@ static void ixgbe_set_rsc(struct ixgbe_adapter *adapter) } } +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (data) { + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + } else { + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + netdev->features &= ~NETIF_F_LRO; + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + ixgbe_set_rsc(adapter); + break; + case ixgbe_mac_82599EB: + need_reset = true; + break; + default: + break; + } + } + + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + static u32 ixgbe_get_tx_csum(struct net_device *netdev) { return (netdev->features & NETIF_F_IP_CSUM) != 0; -- cgit v1.2.3-59-g8ed1b From 1f4d51836f5e49f2e5201f1daf90239c04b3faf2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 14 May 2011 01:16:02 +0000 Subject: ixgbe: fix ring assignment issues for SR-IOV and drop cases This change fixes the fact that we would trigger a null pointer dereference or specify the wrong ring if the rings were restored. This change makes certain that the DROP queue is a static value, and all other rings are based on the ring offsets for the PF. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 2 ++ drivers/net/ixgbe/ixgbe_main.c | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 596d7943024f..074e9baf069a 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2677,6 +2677,8 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, + (input->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : adapter->rx_ring[input->action]->reg_idx); if (err) goto err_out_w_lock; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2886daec70ae..916a2b64e5f5 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3755,9 +3755,11 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node, node2, &adapter->fdir_filter_list, fdir_node) { ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - filter->action); + &filter->filter, + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); -- cgit v1.2.3-59-g8ed1b From a38a104d7af27b7697bf7c4272f4be5d1ec6ef4c Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 20 May 2011 03:05:14 +0000 Subject: ixgbe: update driver version string Update the ixgbe driver version string to better match the Source Driver with similar device support. Likewise update to the current LAD Linux versioning scheme. Signed-of-by: Don Skidmore Tested-by: Evan Swanson Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 8 ++++---- drivers/net/ixgbe/ixgbe_type.h | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 916a2b64e5f5..2496a27b5991 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -54,11 +54,10 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; #define MAJ 3 -#define MIN 3 +#define MIN 4 #define BUILD 8 -#define KFIX 2 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) "-k" __stringify(KFIX) + __stringify(BUILD) "-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2011 Intel Corporation."; @@ -7713,7 +7712,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* Inform firmware of driver version */ if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, KFIX); + hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, + FW_CEM_UNUSED_VER); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 8b1abd47056f..1eefc0c68409 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2143,7 +2143,8 @@ enum ixgbe_fdir_pballoc_type { #define FW_CEM_HDR_LEN 0x4 #define FW_CEM_CMD_DRIVER_INFO 0xDD #define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 -- cgit v1.2.3-59-g8ed1b