aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c951
1 files changed, 741 insertions, 210 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 97f90edbc068..9cbce1faab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,6 +66,9 @@
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -90,11 +93,6 @@
char cxgb4_driver_name[] = KBUILD_MODNAME;
-#ifdef DRV_VERSION
-#undef DRV_VERSION
-#endif
-#define DRV_VERSION "2.0.0-ko"
-const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
@@ -137,7 +135,6 @@ const char cxgb4_driver_version[] = DRV_VERSION;
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
@@ -186,6 +183,7 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+LIST_HEAD(uld_list);
static int cfg_queues(struct adapter *adap);
@@ -437,8 +435,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
- (dev->flags & IFF_PROMISC) ? 1 : 0,
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
+ mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
}
@@ -451,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
* or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @smt_idx: the destination to store the new SMT index.
*
* Modifies an MPS filter and sets it to the new MAC address if
* @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -505,15 +503,16 @@ set_hash:
*/
static int link_start(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->pf;
+ unsigned int mb = pi->adapter->mbox;
+ int ret;
/*
* We do not set address filters and promiscuity here, the stack does
* that step explicitly.
*/
- ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
+ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
+ dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0)
ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
@@ -824,6 +823,31 @@ static void adap_config_hpfilter(struct adapter *adapter)
"HP filter region isn't supported by FW\n");
}
+static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
+ u16 rss_size, u16 viid)
+{
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
+ rss_size);
+ if (ret)
+ return ret;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ return t4_config_vi_rss(adap, adap->mbox, viid,
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+ rss[0]);
+}
+
/**
* cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
@@ -835,10 +859,10 @@ static void adap_config_hpfilter(struct adapter *adapter)
*/
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
- u16 *rss;
- int i, err;
struct adapter *adapter = pi->adapter;
const struct sge_eth_rxq *rxq;
+ int i, err;
+ u16 *rss;
rxq = &adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
@@ -849,21 +873,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
for (i = 0; i < pi->rss_size; i++, queues++)
rss[i] = rxq[*queues].rspq.abs_id;
- err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
- pi->rss_size, rss, pi->rss_size);
- /* If Tunnel All Lookup isn't specified in the global RSS
- * Configuration, then we need to specify a default Ingress
- * Queue for any ingress packets which aren't hashed. We'll
- * use our first ingress queue ...
- */
- if (!err)
- err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
- FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
- FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
- FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
- FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
- FW_RSS_VI_CONFIG_CMD_UDPEN_F,
- rss[0]);
+ err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
kfree(rss);
return err;
}
@@ -1166,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
@@ -1261,15 +1272,15 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
- const struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ const struct port_info *pi = netdev_priv(dev);
int err;
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
- -1, -1, -1,
+ err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
+ pi->viid_mirror, -1, -1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
@@ -1287,6 +1298,292 @@ static int setup_debugfs(struct adapter *adap)
return 0;
}
+static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
+ struct sge_eth_rxq *mirror_rxq)
+{
+ if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
+ !(adap->flags & CXGB4_SHUTTING_DOWN))
+ cxgb4_quiesce_rx(&mirror_rxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
+ mirror_rxq->msix->aff_mask);
+ free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
+ cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
+ }
+
+ free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
+}
+
+static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eth_rxq *mirror_rxq;
+ struct sge *s = &adap->sge;
+ int ret = 0, msix = 0;
+ u16 i, rxqid;
+ u16 *rss;
+
+ if (!pi->vi_mirror_count)
+ return 0;
+
+ if (s->mirror_rxq[pi->port_id])
+ return 0;
+
+ mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
+ if (!mirror_rxq)
+ return -ENOMEM;
+
+ s->mirror_rxq[pi->port_id] = mirror_rxq;
+
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)adap->sge.intrq.abs_id + 1);
+
+ for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
+ mirror_rxq = &s->mirror_rxq[pi->port_id][i];
+
+ /* Allocate Mirror Rxqs */
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ ret = msix;
+ goto out_free_queues;
+ }
+
+ mirror_rxq->msix = &adap->msix_info[msix];
+ snprintf(mirror_rxq->msix->desc,
+ sizeof(mirror_rxq->msix->desc),
+ "%s-mirrorrxq%d", dev->name, i);
+ }
+
+ init_rspq(adap, &mirror_rxq->rspq,
+ CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
+ CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
+ CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
+ CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
+
+ mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
+
+ ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
+ dev, msix, &mirror_rxq->fl,
+ t4_ethrx_handler, NULL, 0);
+ if (ret)
+ goto out_free_msix_idx;
+
+ /* Setup MSI-X vectors for Mirror Rxqs */
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_irq(mirror_rxq->msix->vec,
+ t4_sge_intr_msix, 0,
+ mirror_rxq->msix->desc,
+ &mirror_rxq->rspq);
+ if (ret)
+ goto out_free_rxq;
+
+ cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
+ &mirror_rxq->msix->aff_mask, i);
+ }
+
+ /* Start NAPI for Mirror Rxqs */
+ cxgb4_enable_rx(adap, &mirror_rxq->rspq);
+ }
+
+ /* Setup RSS for Mirror Rxqs */
+ rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
+ if (!rss) {
+ ret = -ENOMEM;
+ goto out_free_queues;
+ }
+
+ mirror_rxq = &s->mirror_rxq[pi->port_id][0];
+ for (i = 0; i < pi->rss_size; i++)
+ rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
+
+ ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
+ kfree(rss);
+ if (ret)
+ goto out_free_queues;
+
+ return 0;
+
+out_free_rxq:
+ free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
+
+out_free_msix_idx:
+ cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
+
+out_free_queues:
+ while (rxqid-- > 0)
+ cxgb4_port_mirror_free_rxq(adap,
+ &s->mirror_rxq[pi->port_id][rxqid]);
+
+ kfree(s->mirror_rxq[pi->port_id]);
+ s->mirror_rxq[pi->port_id] = NULL;
+ return ret;
+}
+
+static void cxgb4_port_mirror_free_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge *s = &adap->sge;
+ u16 i;
+
+ if (!pi->vi_mirror_count)
+ return;
+
+ if (!s->mirror_rxq[pi->port_id])
+ return;
+
+ for (i = 0; i < pi->nmirrorqsets; i++)
+ cxgb4_port_mirror_free_rxq(adap,
+ &s->mirror_rxq[pi->port_id][i]);
+
+ kfree(s->mirror_rxq[pi->port_id]);
+ s->mirror_rxq[pi->port_id] = NULL;
+}
+
+static int cxgb4_port_mirror_start(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret, idx = -1;
+
+ if (!pi->vi_mirror_count)
+ return 0;
+
+ /* Mirror VIs can be created dynamically after stack had
+ * already setup Rx modes like MTU, promisc, allmulti, etc.
+ * on main VI. So, parse what the stack had setup on the
+ * main VI and update the same on the mirror VI.
+ */
+ ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
+ dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+ !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
+ pi->viid_mirror, ret);
+ return ret;
+ }
+
+ /* Enable replication bit for the device's MAC address
+ * in MPS TCAM, so that the packets for the main VI are
+ * replicated to mirror VI.
+ */
+ ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
+ dev->dev_addr, true, NULL);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
+ pi->viid_mirror, ret);
+ return ret;
+ }
+
+ /* Enabling a Virtual Interface can result in an interrupt
+ * during the processing of the VI Enable command and, in some
+ * paths, result in an attempt to issue another command in the
+ * interrupt context. Thus, we disable interrupts during the
+ * course of the VI Enable command ...
+ */
+ local_bh_disable();
+ ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
+ false);
+ local_bh_enable();
+ if (ret)
+ dev_err(adap->pdev_dev,
+ "Failed starting Mirror VI 0x%x, ret: %d\n",
+ pi->viid_mirror, ret);
+
+ return ret;
+}
+
+static void cxgb4_port_mirror_stop(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!pi->vi_mirror_count)
+ return;
+
+ t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
+ false);
+}
+
+int cxgb4_port_mirror_alloc(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret = 0;
+
+ if (!pi->nmirrorqsets)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pi->vi_mirror_mutex);
+ if (pi->viid_mirror) {
+ pi->vi_mirror_count++;
+ goto out_unlock;
+ }
+
+ ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
+ &pi->viid_mirror);
+ if (ret)
+ goto out_unlock;
+
+ pi->vi_mirror_count = 1;
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
+ ret = cxgb4_port_mirror_alloc_queues(dev);
+ if (ret)
+ goto out_free_vi;
+
+ ret = cxgb4_port_mirror_start(dev);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ mutex_unlock(&pi->vi_mirror_mutex);
+ return 0;
+
+out_free_queues:
+ cxgb4_port_mirror_free_queues(dev);
+
+out_free_vi:
+ pi->vi_mirror_count = 0;
+ t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
+ pi->viid_mirror = 0;
+
+out_unlock:
+ mutex_unlock(&pi->vi_mirror_mutex);
+ return ret;
+}
+
+void cxgb4_port_mirror_free(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ mutex_lock(&pi->vi_mirror_mutex);
+ if (!pi->viid_mirror)
+ goto out_unlock;
+
+ if (pi->vi_mirror_count > 1) {
+ pi->vi_mirror_count--;
+ goto out_unlock;
+ }
+
+ cxgb4_port_mirror_stop(dev);
+ cxgb4_port_mirror_free_queues(dev);
+
+ pi->vi_mirror_count = 0;
+ t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
+ pi->viid_mirror = 0;
+
+out_unlock:
+ mutex_unlock(&pi->vi_mirror_mutex);
+}
+
/*
* upper-layer driver support
*/
@@ -1585,6 +1882,7 @@ static int tid_init(struct tid_info *t)
atomic_set(&t->tids_in_use, 0);
atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
+ atomic_set(&t->eotids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -1616,6 +1914,7 @@ static int tid_init(struct tid_info *t)
* @stid: the server TID
* @sip: local IP address to bind server to
* @sport: the server's TCP port
+ * @vlan: the VLAN header information
* @queue: queue to direct messages from this server to
*
* Create an IP server for the given port and address.
@@ -2344,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@@ -2535,7 +2837,7 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2557,12 +2859,33 @@ int cxgb_open(struct net_device *dev)
return err;
err = link_start(dev);
- if (!err)
- netif_tx_start_all_queues(dev);
+ if (err)
+ return err;
+
+ if (pi->nmirrorqsets) {
+ mutex_lock(&pi->vi_mirror_mutex);
+ err = cxgb4_port_mirror_alloc_queues(dev);
+ if (err)
+ goto out_unlock;
+
+ err = cxgb4_port_mirror_start(dev);
+ if (err)
+ goto out_free_queues;
+ mutex_unlock(&pi->vi_mirror_mutex);
+ }
+
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+out_free_queues:
+ cxgb4_port_mirror_free_queues(dev);
+
+out_unlock:
+ mutex_unlock(&pi->vi_mirror_mutex);
return err;
}
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2576,7 +2899,17 @@ int cxgb_close(struct net_device *dev)
cxgb4_dcb_reset(dev);
dcb_tx_queue_prio_enable(dev, false);
#endif
- return ret;
+ if (ret)
+ return ret;
+
+ if (pi->nmirrorqsets) {
+ mutex_lock(&pi->vi_mirror_mutex);
+ cxgb4_port_mirror_stop(dev);
+ cxgb4_port_mirror_free_queues(dev);
+ mutex_unlock(&pi->vi_mirror_mutex);
+ }
+
+ return 0;
}
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
@@ -2610,7 +2943,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
/* Clear out filter specifications */
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
- f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.val.lport = be16_to_cpu(sport);
f->fs.mask.lport = ~0;
val = (u8 *)&sip;
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
@@ -2842,11 +3175,11 @@ static void cxgb_set_rxmode(struct net_device *dev)
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
+ int ret;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
- -1, -1, -1, true);
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
+ pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
return ret;
@@ -2871,8 +3204,6 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
int err;
u8 *na;
- adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
- PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
if (err)
return;
@@ -3027,7 +3358,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
SCHED_CLASS_RATEUNIT_BITS,
SCHED_CLASS_RATEMODE_ABS,
pi->tx_chan, class_id, 0,
- max_tx_rate * 1000, 0, pktsize);
+ max_tx_rate * 1000, 0, pktsize, 0);
if (ret) {
dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
ret);
@@ -3137,8 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- pi->xact_addr_filt = ret;
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -3404,129 +3734,71 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static void cxgb_del_udp_tunnel(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
{
struct port_info *pi = netdev_priv(netdev);
struct adapter *adapter = pi->adapter;
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
int ret = 0, i;
- if (chip_ver < CHELSIO_T6)
- return;
-
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
- if (!adapter->vxlan_port_cnt ||
- adapter->vxlan_port != ti->port)
- return; /* Invalid VxLAN destination port */
-
- adapter->vxlan_port_cnt--;
- if (adapter->vxlan_port_cnt)
- return;
-
adapter->vxlan_port = 0;
t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
break;
case UDP_TUNNEL_TYPE_GENEVE:
- if (!adapter->geneve_port_cnt ||
- adapter->geneve_port != ti->port)
- return; /* Invalid GENEVE destination port */
-
- adapter->geneve_port_cnt--;
- if (adapter->geneve_port_cnt)
- return;
-
adapter->geneve_port = 0;
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
break;
default:
- return;
+ return -EINVAL;
}
/* Matchall mac entries can be deleted only after all tunnel ports
* are brought down or removed.
*/
if (!adapter->rawf_cnt)
- return;
+ return 0;
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
ret = t4_free_raw_mac_filt(adapter, pi->viid,
match_all_mac, match_all_mac,
- adapter->rawf_start +
- pi->port_id,
+ adapter->rawf_start + pi->port_id,
1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
i);
- return;
+ return ret;
}
}
+
+ return 0;
}
-static void cxgb_add_udp_tunnel(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
{
struct port_info *pi = netdev_priv(netdev);
struct adapter *adapter = pi->adapter;
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
int i, ret;
- if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
- return;
-
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
- /* Callback for adding vxlan port can be called with the same
- * port for both IPv4 and IPv6. We should not disable the
- * offloading when the same port for both protocols is added
- * and later one of them is removed.
- */
- if (adapter->vxlan_port_cnt &&
- adapter->vxlan_port == ti->port) {
- adapter->vxlan_port_cnt++;
- return;
- }
-
- /* We will support only one VxLAN port */
- if (adapter->vxlan_port_cnt) {
- netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
- be16_to_cpu(adapter->vxlan_port),
- be16_to_cpu(ti->port));
- return;
- }
-
adapter->vxlan_port = ti->port;
- adapter->vxlan_port_cnt = 1;
-
t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
break;
case UDP_TUNNEL_TYPE_GENEVE:
- if (adapter->geneve_port_cnt &&
- adapter->geneve_port == ti->port) {
- adapter->geneve_port_cnt++;
- return;
- }
-
- /* We will support only one GENEVE port */
- if (adapter->geneve_port_cnt) {
- netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
- be16_to_cpu(adapter->geneve_port),
- be16_to_cpu(ti->port));
- return;
- }
-
adapter->geneve_port = ti->port;
- adapter->geneve_port_cnt = 1;
-
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
break;
default:
- return;
+ return -EINVAL;
}
/* Create a 'match all' mac filter entry for inner mac,
@@ -3541,18 +3813,27 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
match_all_mac,
match_all_mac,
- adapter->rawf_start +
- pi->port_id,
+ adapter->rawf_start + pi->port_id,
1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
be16_to_cpu(ti->port));
- cxgb_del_udp_tunnel(netdev, ti);
- return;
+ return ret;
}
}
+
+ return 0;
}
+static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
+ .set_port = cxgb_udp_tunnel_set_port,
+ .unset_port = cxgb_udp_tunnel_unset_port,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static netdev_features_t cxgb_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -3591,7 +3872,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_eth_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
@@ -3602,8 +3883,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
- .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
- .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
@@ -3618,23 +3897,21 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
.ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
-#endif
static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
.get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
+#endif
static void notify_fatal_err(struct work_struct *work)
{
@@ -3731,7 +4008,7 @@ static void adap_free_hma_mem(struct adapter *adapter)
if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
- adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
+ adapter->hma.sgt->nents, DMA_BIDIRECTIONAL);
adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
}
@@ -4150,8 +4427,7 @@ static int adap_init0_phy(struct adapter *adap)
/* Load PHY Firmware onto adapter.
*/
- ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
- phy_info->phy_fw_version,
+ ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
(u8 *)phyf->data, phyf->size);
if (ret < 0)
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
@@ -4771,23 +5047,20 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
@@ -4860,7 +5133,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* See if FW supports FW_FILTER2 work request */
if (is_t4(adap->params.chip)) {
- adap->params.filter2_wr_support = 0;
+ adap->params.filter2_wr_support = false;
} else {
params[0] = FW_PARAM_DEV(FILTER2_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
@@ -5140,10 +5413,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5381,10 +5654,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
- u32 i, n10g = 0, qidx = 0, n1g = 0;
u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
+ u32 i, n10g = 0, qidx = 0;
u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
@@ -5430,7 +5703,6 @@ static int cfg_queues(struct adapter *adap)
if (n10g)
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -5448,7 +5720,8 @@ static int cfg_queues(struct adapter *adap)
else
q10g = max(8U, q10g);
- while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ while ((q10g * n10g) >
+ (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
q10g--;
#else /* !CONFIG_CHELSIO_T4_DCB */
@@ -5506,6 +5779,19 @@ static int cfg_queues(struct adapter *adap)
avail_qsets -= s->eoqsets;
}
+ /* Mirror queues must follow same scheme as normal Ethernet
+ * Queues, when there are enough queues available. Otherwise,
+ * allocate at least 1 queue per port. If even 1 queue is not
+ * available, then disable mirror queues support.
+ */
+ if (avail_qsets >= s->max_ethqsets)
+ s->mirrorqsets = s->max_ethqsets;
+ else if (avail_qsets >= adap->params.nports)
+ s->mirrorqsets = adap->params.nports;
+ else
+ s->mirrorqsets = 0;
+ avail_qsets -= s->mirrorqsets;
+
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
@@ -5564,8 +5850,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5580,7 +5865,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -5619,8 +5904,8 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
static int enable_msix(struct adapter *adap)
{
- u32 eth_need, uld_need = 0, ethofld_need = 0;
- u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
+ u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
+ u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
u8 num_uld = 0, nchan = adap->params.nports;
u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
@@ -5651,6 +5936,12 @@ static int enable_msix(struct adapter *adap)
need += ethofld_need;
}
+ if (s->mirrorqsets) {
+ want += s->mirrorqsets;
+ mirror_need = nchan;
+ need += mirror_need;
+ }
+
want += EXTRA_VECS;
need += EXTRA_VECS;
@@ -5684,8 +5975,10 @@ static int enable_msix(struct adapter *adap)
adap->params.ethofld = 0;
s->ofldqsets = 0;
s->eoqsets = 0;
+ s->mirrorqsets = 0;
uld_need = 0;
ethofld_need = 0;
+ mirror_need = 0;
}
num_vec = allocated;
@@ -5699,6 +5992,8 @@ static int enable_msix(struct adapter *adap)
ofldqsets = nchan;
if (is_ethofld(adap))
eoqsets = ethofld_need;
+ if (s->mirrorqsets)
+ mirrorqsets = mirror_need;
num_vec -= need;
while (num_vec) {
@@ -5730,12 +6025,25 @@ static int enable_msix(struct adapter *adap)
num_vec -= uld_need;
}
}
+
+ if (s->mirrorqsets) {
+ while (num_vec) {
+ if (num_vec < mirror_need ||
+ mirrorqsets > s->mirrorqsets)
+ break;
+
+ mirrorqsets++;
+ num_vec -= mirror_need;
+ }
+ }
} else {
ethqsets = s->max_ethqsets;
if (is_uld(adap))
ofldqsets = s->ofldqsets;
if (is_ethofld(adap))
eoqsets = s->eoqsets;
+ if (s->mirrorqsets)
+ mirrorqsets = s->mirrorqsets;
}
if (ethqsets < s->max_ethqsets) {
@@ -5751,6 +6059,15 @@ static int enable_msix(struct adapter *adap)
if (is_ethofld(adap))
s->eoqsets = eoqsets;
+ if (s->mirrorqsets) {
+ s->mirrorqsets = mirrorqsets;
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ pi->nmirrorqsets = s->mirrorqsets / nchan;
+ mutex_init(&pi->vi_mirror_mutex);
+ }
+ }
+
/* map for msix */
ret = alloc_msix_info(adap, allocated);
if (ret)
@@ -5762,8 +6079,9 @@ static int enable_msix(struct adapter *adap)
}
dev_info(adap->pdev_dev,
- "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
- allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
+ "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
+ allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
+ s->mirrorqsets);
kfree(entries);
return 0;
@@ -5840,8 +6158,7 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
- dev->name, adap->params.vpd.id, adap->name, buf);
+ netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf);
}
/*
@@ -5864,12 +6181,13 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
+ cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
@@ -6071,6 +6389,213 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
}
#endif /* CONFIG_PCI_IOV */
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int chcr_offload_state(struct adapter *adap,
+ enum cxgb4_netdev_tls_ops op_val)
+{
+ switch (op_val) {
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ case CXGB4_TLSDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_KTLS].handle) {
+ dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "ch_ktls driver has no registered tlsdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ case CXGB4_XFRMDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
+ dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "chipsec driver has no registered xfrmdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+ default:
+ dev_dbg(adap->pdev_dev,
+ "driver has no support for offload %d\n", op_val);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
+
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+
+static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 tcp_sn)
+{
+ struct adapter *adap = netdev2adap(netdev);
+ int ret;
+
+ mutex_lock(&uld_mutex);
+ ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
+ if (ret)
+ goto out_unlock;
+
+ ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
+ direction,
+ crypto_info,
+ tcp_sn);
+ /* if there is a failure, clear the refcount */
+ if (ret)
+ cxgb4_set_ktls_feature(adap,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct adapter *adap = netdev2adap(netdev);
+
+ mutex_lock(&uld_mutex);
+ if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ direction);
+
+out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct tlsdev_ops cxgb4_ktls_ops = {
+ .tls_dev_add = cxgb4_ktls_dev_add,
+ .tls_dev_del = cxgb4_ktls_dev_del,
+};
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ int ret;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return -EBUSY;
+ }
+ ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+
+ return ret;
+}
+
+static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ bool ret = false;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return ret;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_advance_esn_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
+ .xdo_dev_state_add = cxgb4_xfrm_add_state,
+ .xdo_dev_state_delete = cxgb4_xfrm_del_state,
+ .xdo_dev_state_free = cxgb4_xfrm_free_state,
+ .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
+};
+
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -6078,7 +6603,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int adap_idx = 1;
int s_qpp, qpp, num_seg;
struct port_info *pi;
- bool highdma = false;
enum chip_type chip;
void __iomem *regs;
int func, chip_ver;
@@ -6086,8 +6610,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err;
u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
-
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
@@ -6159,20 +6681,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = true;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
- "coherent allocations\n");
- goto out_free_adapter;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_free_adapter;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_free_adapter;
}
pci_enable_pcie_error_reporting(pdev);
@@ -6260,13 +6772,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_memwin(adapter);
err = adap_init0(adapter, 0);
-#ifdef CONFIG_DEBUG_FS
- bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
-#endif
- setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
+ setup_memwin_rdma(adapter);
+
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
@@ -6303,7 +6813,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_TC;
+ NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
if (chip_ver > CHELSIO_T5) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6316,12 +6826,28 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_HW_TLS_RECORD;
+
+ if (adapter->rawf_cnt)
+ netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
}
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &cxgb4_ktls_ops;
+ /* initialize the refcount */
+ refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
+ }
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
+ }
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -6419,6 +6945,24 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
i);
}
+ if (is_offload(adapter) || is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+ u32 v;
+
+ v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
+ if (chip_ver <= CHELSIO_T5) {
+ adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
+ v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
+ adapter->tids.hash_base = v / 4;
+ } else {
+ adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
+ v = t4_read_reg(adapter,
+ T6_LE_DB_HASH_TID_BASE_A);
+ adapter->tids.hash_base = v;
+ }
+ }
+ }
+
if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
@@ -6440,22 +6984,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_matchall(adapter))
dev_warn(&pdev->dev,
"could not offload tc matchall, continuing\n");
- }
-
- if (is_offload(adapter) || is_hashfilter(adapter)) {
- if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
- u32 hash_base, hash_reg;
-
- if (chip_ver <= CHELSIO_T5) {
- hash_reg = LE_DB_TID_HASHBASE_A;
- hash_base = t4_read_reg(adapter, hash_reg);
- adapter->tids.hash_base = hash_base / 4;
- } else {
- hash_reg = T6_LE_DB_HASH_TID_BASE_A;
- hash_base = t4_read_reg(adapter, hash_reg);
- adapter->tids.hash_base = hash_base;
- }
- }
+ if (cxgb4_init_ethtool_filters(adapter))
+ dev_warn(&pdev->dev,
+ "could not initialize ethtool filters, continuing\n");
}
/* See what interrupts we'll be using */
@@ -6529,11 +7060,8 @@ fw_attach_fail:
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_uld(adapter)) {
- mutex_lock(&uld_mutex);
- list_add_tail(&adapter->list_node, &adapter_list);
- mutex_unlock(&uld_mutex);
- }
+ if (is_uld(adapter))
+ cxgb4_uld_enable(adapter);
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
@@ -6596,10 +7124,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) {
- detach_ulds(adapter);
- t4_uld_clean_up(adapter);
- }
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -6607,10 +7138,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
-
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))
@@ -6682,6 +7209,10 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
cxgb_close(adapter->port[i]);
+ rtnl_lock();
+ cxgb4_mqprio_stop_offload(adapter);
+ rtnl_unlock();
+
if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);