aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c602
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c551
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c280
52 files changed, 4058 insertions, 1561 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc92aa68..1a0c3bf86ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
}
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ pg[i] = MLX4_EN_TC_VENDOR;
+ tc_tx_bw[i] = MLX4_EN_BW_MAX;
+ break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
+ if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+ en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ MLX4_EN_MIN_RX_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+ en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+ __func__, param->tx_pending,
+ MLX4_EN_MIN_TX_SIZE);
+ return -EINVAL;
+ }
+
rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a294fa6..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
- node))
+ node, i))
goto err;
+
}
#ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ u8 prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+ priv->ets.prio_tc[prio] = prio;
+ priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28efcda33..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node)
+ u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+ goto err_ring;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
- goto err_ring;
+ goto err_xdp_info;
}
}
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_xdp_info:
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err_ring:
kfree(ring);
*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
return 0;
}
#endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hdr += sizeof(struct vlan_hdr);
}
- if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
- return 0;
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int cq_ring = cq->ring;
bool doorbell_pending;
struct mlx4_cqe *cqe;
+ struct xdp_buff xdp;
int polled = 0;
int index;
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
+ xdp.rxq = &ring->xdp_rxq;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* read bytes but not past the end of the frag.
*/
if (xdp_prog) {
- struct xdp_buff xdp;
dma_addr_t dma;
void *orig_data;
u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- cqe->checksum == cpu_to_be16(0xffff)) {
- bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
- ip_summed = CHECKSUM_UNNECESSARY;
- hash_type = PKT_HASH_TYPE_L4;
- if (l2_tunnel)
- skb->csum_level = 1;
- ring->csum_ok++;
- } else {
+ bool l2_tunnel;
+
+ if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)))
goto csum_none;
- }
+
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
+ if (l2_tunnel)
+ skb->csum_level = 1;
+ ring->csum_ok++;
} else {
- if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV6))) {
- if (check_csum(cqe, skb, va, dev->features)) {
- goto csum_none;
- } else {
- ip_summed = CHECKSUM_COMPLETE;
- hash_type = PKT_HASH_TYPE_L3;
- ring->csum_complete++;
- }
- } else {
+ if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+ MLX4_CQE_STATUS_IPV6))))
+#else
+ 0))))
+#endif
goto csum_none;
- }
+ if (check_csum(cqe, skb, va, dev->features))
+ goto csum_none;
+ ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
+ ring->csum_complete++;
}
} else {
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b72677eccd4..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
+ struct xdp_rxq_info xdp_rxq;
};
struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
#define MLX4_EN_BW_MIN 1
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+#define MLX4_EN_TC_VENDOR 0
#define MLX4_EN_TC_ETS 7
enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node);
+ u32 size, u16 stride, int node, int queue_index);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764991c9..2e84f10f59ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
if (!fmr->maps)
return;
- fmr->maps = 0;
+ /* To unmap: it is sufficient to take back ownership from HW */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
- return;
- }
+ /* Make sure MPT status is visible */
+ wmb();
- err = mlx4_HW2SW_MPT(dev, NULL,
- key_to_hw_index(fmr->mr.key) &
- (dev->caps.num_mpts - 1));
- mlx4_free_cmd_mailbox(dev, mailbox);
- if (err) {
- pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
- return;
- }
- fmr->mr.enabled = MLX4_MPT_EN_SW;
+ fmr->maps = 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
+ if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+ /* In case of FMR was enabled and unmapped
+ * make sure to give ownership of MPT back to HW
+ * so HW2SW_MPT command will success.
+ */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+ /* Make sure MPT status is visible before changing MPT fields */
+ wmb();
+ fmr->mpt->length = 0;
+ fmr->mpt->start = 0;
+ /* Make sure MPT data is visible after changing MPT status */
+ wmb();
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+ /* make sure MPT status is visible */
+ wmb();
+ }
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c2d89bfa1a70..4c9360b25532 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,11 @@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -226,12 +229,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -242,8 +239,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -253,7 +250,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -472,32 +469,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -558,7 +529,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -571,6 +542,9 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
+
+ /* XDP read-mostly */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct mlx5e_channel {
@@ -587,6 +561,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
+ struct mlx5e_ch_stats stats;
/* control */
struct mlx5e_priv *priv;
@@ -655,6 +630,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -722,6 +698,11 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
@@ -860,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
+void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -1054,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
@@ -1071,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+int mlx5e_bits_invert(unsigned long a, int size);
+
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
@@ -1110,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ea5fff2c3143..cc8048f68f11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -295,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
- u32 max_rq_size;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -314,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
- max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
@@ -325,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > max_rq_size) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
- __func__, param->rx_pending,
- max_rq_size);
- return -EINVAL;
- }
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -346,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
- if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
- return -EINVAL;
- }
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
@@ -479,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -533,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -541,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index def513484845..f64dda2bed31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
- ttc = &priv->fs.ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
goto del_rules;
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
rules = ttc->tunnel_rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.inner_ttc.ft.t;
+ dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
int err;
int tt;
- ttc = &priv->fs.inner_ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->inner_indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
@@ -1075,21 +1075,42 @@ err:
return err;
}
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params)
+{
+ ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params->inner_ttc = &priv->fs.inner_ttc;
+}
+
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
- ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_inner_ttc_table_rules(priv);
+ err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1111,10 +1132,9 @@ err:
return err;
}
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_ttc_table_rules(priv);
+ err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d8aefeed124d..47bab842c5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -173,182 +173,23 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats temp, *s = &temp;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
-
- memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = &c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
- s->rx_lro_packets += rq_stats->lro_packets;
- s->rx_lro_bytes += rq_stats->lro_bytes;
- s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
- s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_complete += rq_stats->csum_complete;
- s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
- s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
- s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
- s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
- s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
-
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_tso_packets += sq_stats->tso_packets;
- s->tx_tso_bytes += sq_stats->tso_bytes;
- s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
- s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
- s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
- s->tx_queue_stopped += sq_stats->stopped;
- s->tx_queue_wake += sq_stats->wake;
- s->tx_queue_dropped += sq_stats->dropped;
- s->tx_xmit_more += sq_stats->xmit_more;
- s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- s->tx_csum_none += sq_stats->csum_none;
- s->tx_csum_partial += sq_stats->csum_partial;
- }
- }
-
- s->link_down_events_phy = MLX5_GET(ppcnt_reg,
- priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
- memcpy(&priv->stats.sw, s, sizeof(*s));
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_core_dev *mdev = priv->mdev;
-
- MLX5_SET(query_vport_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int prio;
- void *out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- out = pstats->IEEE_802_3_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (!full)
- return;
-
- out = pstats->RFC_2863_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->RFC_2819_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->phy_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
- out = pstats->eth_ext_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- out = pstats->per_prio_counters[prio];
- MLX5_SET(ppcnt_reg, in, prio_tc, prio);
- mlx5_core_access_reg(mdev, in, sz, out, sz,
- MLX5_REG_PPCNT, 0, 0);
- }
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
- struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
-
- if (!priv->q_counter)
- return;
-
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
- return;
-
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
-
- if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
- return;
-
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-}
+ int i;
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
-{
- if (full) {
- mlx5e_update_pcie_counters(priv);
- mlx5e_ipsec_update_stats(priv);
- }
- mlx5e_update_pport_counters(priv, full);
- mlx5e_update_vport_counters(priv);
- mlx5e_update_q_counter(priv);
- mlx5e_update_sw_counters(priv);
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats)
+ mlx5e_stats_grps[i].update_stats(priv);
}
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_stats(priv, false);
+ int i;
+
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats_mask &
+ MLX5E_NDO_UPDATE_STATS)
+ mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -582,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
+ goto err_rq_wq_destroy;
+
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = params->rq_headroom;
@@ -674,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -687,6 +541,7 @@ err_destroy_umr_mkey:
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -699,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
@@ -919,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -952,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1565,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1747,7 +1604,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
@@ -1999,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2203,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
-static int mlx5e_bits_invert(unsigned long a, int size)
+int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
@@ -2765,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
if (err)
return err;
+ /* Mark as unused given "Drop-RQ" packets never reach XDP */
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
rq->mdev = mdev;
return 0;
@@ -3084,9 +2944,6 @@ out:
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -3104,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -3738,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
+static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int irqn_not_used, eqn;
+ struct mlx5_eq *eq;
+ u32 eqe_count;
+
+ if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
+ return false;
+
+ eq = mlx5_eqn2eq(mdev, eqn);
+ if (IS_ERR(eq))
+ return false;
+
+ netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eqn, eq->cons_index, eq->irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return false;
+
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ sq->channel->stats.eq_rearm++;
+ return true;
+}
+
static void mlx5e_tx_timeout(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool sched_work = false;
+ bool reopen_channels = false;
int i;
netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ if (!netif_xmit_stopped(dev_queue))
continue;
- sched_work = true;
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
- i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - dev_queue->trans_start));
+
+ /* If we recover a lost interrupt, most likely TX timeout will
+ * be resolved, skip reopening channels
+ */
+ if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ reopen_channels = true;
+ }
}
- if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work);
}
@@ -4044,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4108,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
@@ -4315,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
-
- if (priv->channels.params.xdp_prog)
- bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3409d86eb06b..363d8dcb7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
return 0;
}
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_sq *rep_sq, *tmp;
+ struct mlx5e_rep_priv *rpriv;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ list_del(&rep_sq->list);
+ kfree(rep_sq);
+ }
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ for (i = 0; i < sqns_num; i++) {
+ rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+ if (!rep_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ rep->vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(rep_sq);
+ goto out_err;
+ }
+ rep_sq->send_to_vport_rule = flow_rule;
+ list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5e_sqs2vport_stop(esw, rep);
+ return err;
+}
+
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
sqs[num_sqs++] = c->sq[tc].sqn;
}
- err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+ err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5e_sqs2vport_stop(esw, rep);
}
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
#endif
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
DELAY_PROBE_TIME);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe;
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
neigh_update.netevent_nb);
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe = NULL;
struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
unregister_netevent_notifier(&neigh_update->netevent_nb);
@@ -662,9 +719,6 @@ static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
@@ -906,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = PTR_ERR(flow_rule);
goto err_destroy_direct_tirs;
}
- rep->vport_rx_rule = flow_rule;
+ rpriv->vport_rx_rule = flow_rule;
err = mlx5e_tc_init(priv);
if (err)
@@ -915,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -926,10 +980,9 @@ err_destroy_direct_rqts:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
}
@@ -969,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
/* e-Switch vport representors */
static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
int err;
@@ -994,10 +1047,10 @@ err_remove_sqs:
}
static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -1010,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
}
static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
@@ -1021,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1029,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- rep->netdev = netdev;
+ rpriv->netdev = netdev;
rpriv->rep = rep;
+ rep->rep_if[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
@@ -1046,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
upriv);
if (err)
@@ -1078,16 +1135,19 @@ err_destroy_netdev:
}
static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
struct mlx5e_priv *upriv;
- unregister_netdev(rep->netdev);
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ unregister_netdev(netdev);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+ REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1102,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- u8 mac[ETH_ALEN];
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if = {};
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ rep_if.load = mlx5e_vport_rep_load;
+ rep_if.unload = mlx5e_vport_rep_unload;
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1125,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
int vport;
for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
+ mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if;
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = priv->ppriv;
+ rpriv->netdev = priv->netdev;
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = priv->netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+ rep_if.load = mlx5e_nic_rep_load;
+ rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
mlx5e_rep_register_vf_vports(priv); /* VFs vports */
}
@@ -1150,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+ mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
}
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
+ struct net_device *netdev;
+ struct mlx5_flow_handle *vport_rx_rule;
+ struct list_head vport_sqs_list;
};
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+ return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
struct mlx5e_neigh {
struct net_device *dev;
union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
int encap_size;
};
+struct mlx5e_rep_sq {
+ struct mlx5_flow_handle *send_to_vport_rule;
+ struct list_head list;
+};
+
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..0d4bb0688faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
return;
}
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_last_ethertype_ip(skb, &network_depth)) {
+ if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index b69a705fd787..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (!prev->ppms)
- return curr->ppms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
- if (!prev->epms)
- return MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b74ddc7984bc..5f0f3493d747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ struct mlx5e_ch_stats *ch_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
+ ch_stats = &c->stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
+ }
+ }
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+ memcpy(&priv->stats.sw, s, sizeof(*s));
+}
+
static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
@@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
+
+ if (!priv->q_counter)
+ return;
+
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
static const struct counter_desc vport_stats_desc[] = {
{ "rx_vport_unicast_packets",
@@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
#define PPORT_802_3_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
@@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2819_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
@@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->eth_ext_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
static const struct counter_desc pcie_perf_stats_desc[] = {
@@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+}
+
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
+static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+ mlx5e_grp_per_prio_pfc_get_num_stats(priv);
+}
+
+static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
+ return idx;
+}
+
+static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
+}
+
static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 },
};
@@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
+static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_ipsec_update_stats(priv);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
+static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
}
@@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ch_stats_desc[j].format, i);
+
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
@@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+ ch_stats_desc, j);
+
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
@@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+/* The stats groups order is opposite to the update_stats() order calls */
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
{
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_sw_update_stats,
},
{
.get_num_stats = mlx5e_grp_q_get_num_stats,
.fill_strings = mlx5e_grp_q_fill_strings,
.fill_stats = mlx5e_grp_q_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_q_update_stats,
},
{
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_vport_update_stats,
},
{
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
.fill_strings = mlx5e_grp_802_3_fill_strings,
.fill_stats = mlx5e_grp_802_3_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_802_3_update_stats,
},
{
.get_num_stats = mlx5e_grp_2863_get_num_stats,
.fill_strings = mlx5e_grp_2863_fill_strings,
.fill_stats = mlx5e_grp_2863_fill_stats,
+ .update_stats = mlx5e_grp_2863_update_stats,
},
{
.get_num_stats = mlx5e_grp_2819_get_num_stats,
.fill_strings = mlx5e_grp_2819_fill_strings,
.fill_stats = mlx5e_grp_2819_fill_stats,
+ .update_stats = mlx5e_grp_2819_update_stats,
},
{
.get_num_stats = mlx5e_grp_phy_get_num_stats,
.fill_strings = mlx5e_grp_phy_fill_strings,
.fill_stats = mlx5e_grp_phy_fill_stats,
+ .update_stats = mlx5e_grp_phy_update_stats,
},
{
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ .update_stats = mlx5e_grp_eth_ext_update_stats,
},
{
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
.fill_strings = mlx5e_grp_pcie_fill_strings,
.fill_stats = mlx5e_grp_pcie_fill_stats,
+ .update_stats = mlx5e_grp_pcie_update_stats,
},
{
- .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_fill_stats,
+ .update_stats = mlx5e_grp_per_prio_update_stats,
},
{
.get_num_stats = mlx5e_grp_pme_get_num_stats,
@@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
.fill_strings = mlx5e_grp_ipsec_fill_strings,
.fill_stats = mlx5e_grp_ipsec_fill_stats,
+ .update_stats = mlx5e_grp_ipsec_update_stats,
},
{
.get_num_stats = mlx5e_grp_channels_get_num_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index d679e21f686e..0b3320a2b072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,7 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
char format[ETH_GSTRING_LEN];
@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 ch_eq_rearm;
/* Special handling counters */
u64 link_down_events_phy;
@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
u64 dropped;
};
+struct mlx5e_ch_stats {
+ u64 eq_rearm;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -201,11 +207,17 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
struct mlx5e_priv;
struct mlx5e_stats_grp {
+ u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec2e88a..fd98b0dc610f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,17 +51,22 @@
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+#include "fs_core.h"
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
};
struct mlx5e_tc_flow {
@@ -71,6 +76,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +99,32 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5e_priv *func_priv;
+ u32 tdn;
+ u32 tirn;
+
+ int num_channels;
+ struct mlx5e_rqt indir_rqt;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table ttc;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ u16 peer_vhca_id;
+ u8 prio;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+{
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+ struct mlx5e_priv *priv = hp->func_priv;
+ int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+
+ mlx5e_build_default_indir_rqt(indirection_rqt, sz,
+ hp->num_channels);
+
+ for (i = 0; i < sz; i++) {
+ ix = i;
+ if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+ ix = indirection_rqt[ix];
+ rqn = hp->pair->rqn[ix];
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+{
+ int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqtc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+
+ err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
+ if (!err)
+ hp->indir_rqt.enabled = true;
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)];
+ int tt, i, err;
+ void *tirc;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in,
+ MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ if (err) {
+ mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+ }
+ return 0;
+
+err_destroy_tirs:
+ for (i = 0; i < tt; i++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+}
+
+static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
+
+ memset(ttc_params, 0, sizeof(*ttc_params));
+
+ ttc_params->any_tt_tirn = hp->tirn;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+
+ ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_TC_PRIO;
+}
+
+static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct ttc_params ttc_params;
+ int err;
+
+ err = mlx5e_hairpin_create_indirect_rqt(hp);
+ if (err)
+ return err;
+
+ err = mlx5e_hairpin_create_indirect_tirs(hp);
+ if (err)
+ goto err_create_indirect_tirs;
+
+ mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
+ if (err)
+ goto err_create_ttc_table;
+
+ netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
+ hp->num_channels, hp->ttc.ft.t->id);
+
+ return 0;
+
+err_create_ttc_table:
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+err_create_indirect_tirs:
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+
+ return err;
+}
+
+static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+
+ mlx5e_destroy_ttc_table(priv, &hp->ttc);
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+ hp->func_priv = priv;
+ hp->num_channels = params->num_channels;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ if (hp->num_channels > 1) {
+ err = mlx5e_hairpin_rss_init(hp);
+ if (err)
+ goto rss_init_err;
+ }
+
+ return hp;
+
+rss_init_err:
+ mlx5e_hairpin_destroy_transport(hp);
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ if (hp->num_channels > 1)
+ mlx5e_hairpin_rss_cleanup(hp);
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
+{
+ return (peer_vhca_id << 16 | prio);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ u16 peer_vhca_id, u8 prio)
+{
+ struct mlx5e_hairpin_entry *hpe;
+ u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, hash_key) {
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+#define UNKNOWN_MATCH_PRIO 8
+
+static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec, u8 *match_prio)
+{
+ void *headers_c, *headers_v;
+ u8 prio_val, prio_mask = 0;
+ bool vlan_present;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
+ netdev_warn(priv->netdev,
+ "only PCP trust state supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
+ vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
+ if (vlan_present) {
+ prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ }
+
+ if (!vlan_present || !prio_mask) {
+ prio_val = UNKNOWN_MATCH_PRIO;
+ } else if (prio_mask != 0x7) {
+ netdev_warn(priv->netdev,
+ "masked priority match not supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+
+ *match_prio = prio_val;
+ return 0;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ u64 link_speed64;
+ u32 link_speed;
+ u8 match_prio;
+ u16 peer_id;
+ int err;
+
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ if (err)
+ return err;
+ hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_vhca_id = peer_id;
+ hpe->prio = match_prio;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+
+ params.log_num_packets = params.log_data_size -
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
+ params.log_num_packets = min_t(u8, params.log_num_packets,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+
+ params.q_counter = priv->q_counter;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ params.num_channels = link_speed64;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
+ hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+
+attach_flow:
+ if (hpe->hp->num_channels > 1) {
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ } else {
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ }
+ list_add(&flow->hairpin, &hpe->flows);
+
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->hairpin_ft;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ }
+ dest_ix++;
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_PRIO,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
- 0, 0);
+ MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
@@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +771,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
@@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
struct rtable *rt;
struct neighbour *n = NULL;
@@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = rt->dst.dev;
@@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
@@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = dst->dev;
#else
@@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+ REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+ out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
@@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- parse_attr->mirred_ifindex = ifindex;
+ parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index e7e7cef2bde4..4d98ce0901af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -530,6 +530,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
+ */
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
+{
+ u32 count_eqe;
+
+ disable_irq(eq->irqn);
+ count_eqe = eq->cons_index;
+ mlx5_eq_int(eq->irqn, eq);
+ count_eqe = eq->cons_index - count_eqe;
+ enable_irq(eq->irqn);
+
+ return count_eqe;
+}
+
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &tsar_ctx,
+ tsar_ctx,
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
if (vport->qos.enabled)
return -EEXIST;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
initial_max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
if (!vport->qos.enabled)
return -EIO;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
vport->qos.esw_tsar_ix,
bitmask);
if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->offloads.vport_reps =
- kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps) {
- err = -ENOMEM;
+ err = esw_offloads_init_reps(esw);
+ if (err)
goto abort;
- }
hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
- kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- kfree(esw->offloads.vport_reps);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
kfree(esw);
}
@@ -2018,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
SRIOV_OFFLOADS
};
+enum {
+ REP_ETH,
+ NUM_REP_TYPES,
+};
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
};
};
-struct mlx5_esw_sq {
- struct mlx5_flow_handle *send_to_vport_rule;
- struct list_head list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
};
struct mlx5_eswitch_rep {
- int (*load)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
u16 vport;
u8 hw_id[ETH_ALEN];
- struct net_device *netdev;
-
- struct mlx5_flow_handle *vport_rx_rule;
- struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
- bool valid;
};
struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+ u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (!rep->valid)
+ if (!rep->rep_if[REP_ETH].valid)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
return err;
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
return flow_rule;
}
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_esw_sq *esw_sq, *tmp;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return;
-
- list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
- mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
- list_del(&esw_sq->list);
- kfree(esw_sq);
- }
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_esw_sq *esw_sq;
- int err;
- int i;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return 0;
-
- for (i = 0; i < sqns_num; i++) {
- esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
- if (!esw_sq) {
- err = -ENOMEM;
- goto out_err;
- }
-
- /* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
- sqns_array[i]);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- kfree(esw_sq);
- goto out_err;
- }
- esw_sq->send_to_vport_rule = flow_rule;
- list_add(&esw_sq->list, &rep->vport_sqs_list);
- }
- return 0;
-
-out_err:
- mlx5_eswitch_sqs2vport_stop(esw, rep);
- return err;
+ mlx5_del_flow_rules(rule);
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
return err;
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_esw_offload *offloads;
+ struct mlx5_eswitch_rep *rep;
+ u8 hw_id[ETH_ALEN];
+ int vport;
+
+ esw->offloads.vport_reps = kcalloc(total_vfs,
+ sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps)
+ return -ENOMEM;
+
+ offloads = &esw->offloads;
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+ for (vport = 0; vport < total_vfs; vport++) {
+ rep = &offloads->vport_reps[vport];
+
+ rep->vport = vport;
+ ether_addr_copy(rep->hw_id, hw_id);
+ }
+
+ offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+ return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = nvports - 1; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ rep->rep_if[rep_type].unload(rep);
+ }
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = NUM_REP_TYPES;
+
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
int err;
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
+
+ return 0;
+
+err_reps:
+ esw_offloads_unload_reps_type(esw, vport, rep_type);
+ return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = 0;
+ int err;
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+ if (err)
+ goto err_reps;
+ }
+
+ return err;
+
+err_reps:
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ int err;
+
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto create_fg_err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
-
- err = rep->load(esw, rep);
- if (err)
- goto err_reps;
- }
+ err = esw_offloads_load_reps(esw, nvports);
+ if (err)
+ goto err_reps;
return 0;
err_reps:
- for (vport--; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
esw_destroy_vport_rx_group(esw);
create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
- struct mlx5_eswitch_rep *rep;
- int vport;
-
- for (vport = nvports - 1; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
-
+ esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *__rep)
+ struct mlx5_eswitch_rep_if *__rep_if,
+ u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
- struct mlx5_eswitch_rep *rep;
-
- rep = &offloads->vport_reps[vport_index];
+ struct mlx5_eswitch_rep_if *rep_if;
- memset(rep, 0, sizeof(*rep));
+ rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
- rep->load = __rep->load;
- rep->unload = __rep->unload;
- rep->vport = __rep->vport;
- rep->netdev = __rep->netdev;
- ether_addr_copy(rep->hw_id, __rep->hw_id);
+ rep_if->load = __rep_if->load;
+ rep_if->unload = __rep_if->unload;
+ rep_if->priv = __rep_if->priv;
- INIT_LIST_HEAD(&rep->vport_sqs_list);
- rep->valid = true;
+ rep_if->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index)
+ int vport_index, u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
- rep->unload(esw, rep);
+ rep->rep_if[rep_type].unload(rep);
- rep->valid = false;
+ rep->rep_if[rep_type].valid = false;
}
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
- return rep->netdev;
+ return rep->rep_if[rep_type].priv;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9ecb2b8..c025c98700e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -89,6 +89,9 @@
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+#define KERNEL_NIC_TC_NUM_PRIOS 1
+#define KERNEL_NIC_TC_NUM_LEVELS 2
+
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
@@ -134,7 +137,7 @@ static struct init_tree_node {
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
@@ -2026,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->fdb_root_ns->ns;
else
return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (steering->esw_egress_root_ns)
- return &steering->esw_egress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (steering->esw_ingress_root_ns)
- return &steering->esw_ingress_root_ns->ns;
- else
- return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (steering->esw_egress_root_ns &&
+ steering->esw_egress_root_ns[vport])
+ return &steering->esw_egress_root_ns[vport]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (steering->esw_ingress_root_ns &&
+ steering->esw_ingress_root_ns[vport])
+ return &steering->esw_ingress_root_ns[vport]->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio, int num_levels)
{
@@ -2343,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+}
+
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->esw_egress_root_ns);
- cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_egress_acls_root_ns(dev);
+ cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2466,86 @@ out_err:
return PTR_ERR(prio);
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns)
+ steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns)
+ steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_egress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ kfree(steering->esw_egress_root_ns);
+ return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_ingress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_ingress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ kfree(steering->esw_ingress_root_ns);
+ return err;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
@@ -2488,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(steering);
+ err = init_egress_acls_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(steering);
+ err = init_ingress_acls_root_ns(dev);
if (err)
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
- struct mlx5_flow_root_namespace *esw_egress_root_ns;
- struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace **esw_egress_root_ns;
+ struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index ee2f378c5030..264504a990ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -242,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -257,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -274,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -283,8 +292,8 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
@@ -398,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -486,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev)
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
- mlx5e_close_channels(&epriv->channels);;
+ mlx5e_close_channels(&epriv->channels);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index ff4a0b889a6f..b5a46c128b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..9e38343a951f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int i, j, err;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
+ if (err)
+ goto out_err_rq;
+ }
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
+ if (err)
+ goto out_err_sq;
+ }
+
+ return 0;
+
+out_err_sq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
+ i = hp->num_channels;
+out_err_rq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ }
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int i, j, err;
+
+ /* set peer SQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
+ if (err)
+ goto err_modify_sq;
+ }
+
+ /* set func RQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
+ if (err)
+ goto err_modify_rq;
+ }
+
+ return 0;
+
+err_modify_rq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+ i = hp->num_channels;
+err_modify_sq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ /* unset func RQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+ hp->num_channels = params->num_channels;
+
+ hp->rqn = (void *)hp + sizeof(*hp);
+ hp->sqn = hp->rqn + params->num_channels;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+ int err;
+
+ if (!mlxsw_bus->reset)
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ mlxsw_bus->reset(mlxsw_core->bus_priv);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
+ if (err)
+ mlxsw_core->reload_fail = true;
+ return err;
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv)
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
- alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
- if (!devlink) {
- err = -ENOMEM;
- goto err_devlink_alloc;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
}
mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = devlink_register(devlink, mlxsw_bus_info->dev);
- if (err)
- goto err_devlink_register;
+ if (!reload) {
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+ }
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- devlink_free(devlink);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+ if (!reload)
+ devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (mlxsw_core->reload_fail)
+ goto reload_fail;
+
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (reload)
+ return;
+reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+ void *bus_priv, bool reload,
+ struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
+ void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 6a979a09ab72..b698fb481b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -310,9 +310,33 @@ struct mlxsw_afa_block {
struct mlxsw_afa_set *first_set;
struct mlxsw_afa_set *cur_set;
unsigned int cur_act_index; /* In current set. */
- struct list_head fwd_entry_ref_list;
+ struct list_head resource_list; /* List of resources held by actions
+ * in this block.
+ */
};
+struct mlxsw_afa_resource {
+ struct list_head list;
+ void (*destructor)(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_resource *resource, *tmp;
+
+ list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+ list_del(&resource->list);
+ resource->destructor(block, resource);
+ }
+}
+
struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
{
struct mlxsw_afa_block *block;
@@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
- INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+ INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
/* At least one action set is always present, so just create it here */
@@ -336,8 +360,6 @@ err_first_set_create:
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
-
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_set *set = block->first_set;
@@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
mlxsw_afa_set_put(block->afa, set);
set = next_set;
} while (set);
- mlxsw_afa_fwd_entry_refs_destroy(block);
+ mlxsw_afa_resources_destroy(block);
kfree(block);
}
EXPORT_SYMBOL(mlxsw_afa_block_destroy);
@@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
}
struct mlxsw_afa_fwd_entry_ref {
- struct list_head list;
+ struct mlxsw_afa_resource resource;
struct mlxsw_afa_fwd_entry *fwd_entry;
};
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+ fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+ resource);
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
static struct mlxsw_afa_fwd_entry_ref *
mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
{
@@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
goto err_fwd_entry_get;
}
fwd_entry_ref->fwd_entry = fwd_entry;
- list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+ fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+ mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
return fwd_entry_ref;
err_fwd_entry_get:
@@ -517,23 +559,51 @@ err_fwd_entry_get:
return ERR_PTR(err);
}
+struct mlxsw_afa_counter {
+ struct mlxsw_afa_resource resource;
+ u32 counter_index;
+};
+
static void
-mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_counter *counter)
{
- list_del(&fwd_entry_ref->list);
- mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
- kfree(fwd_entry_ref);
+ block->afa->ops->counter_index_put(block->afa->ops_priv,
+ counter->counter_index);
+ kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_counter *counter;
+
+ counter = container_of(resource, struct mlxsw_afa_counter, resource);
+ mlxsw_afa_counter_destroy(block, counter);
}
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
{
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
- struct mlxsw_afa_fwd_entry_ref *tmp;
+ struct mlxsw_afa_counter *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+ &counter->counter_index);
+ if (err)
+ goto err_counter_index_get;
+ counter->resource.destructor = mlxsw_afa_counter_destructor;
+ mlxsw_afa_resource_add(block, &counter->resource);
+ return counter;
- list_for_each_entry_safe(fwd_entry_ref, tmp,
- &block->fwd_entry_ref_list, list)
- mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+err_counter_index_get:
+ kfree(counter);
+ return ERR_PTR(err);
}
#define MLXSW_AFA_ONE_ACTION_LEN 32
@@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
*/
MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+/* afa_trapdisc_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+
+/* afa_trapdisc_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
enum mlxsw_afa_trapdisc_trap_action trap_action,
@@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload,
mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
+static inline void
+mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
+{
+ mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+}
+
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
{
char *act = mlxsw_afa_block_append_action(block,
@@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+struct mlxsw_afa_mirror {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_in_port;
+ u8 local_out_port;
+ bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_mirror *mirror)
+{
+ block->afa->ops->mirror_del(block->afa->ops_priv,
+ mirror->local_in_port,
+ mirror->local_out_port,
+ mirror->ingress);
+ kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_mirror *mirror;
+
+ mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+ mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+ if (!mirror)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->mirror_add(block->afa->ops_priv,
+ local_in_port, local_out_port,
+ ingress, &mirror->span_id);
+ if (err)
+ goto err_mirror_add;
+
+ mirror->ingress = ingress;
+ mirror->local_out_port = local_out_port;
+ mirror->local_in_port = local_in_port;
+ mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+ mlxsw_afa_resource_add(block, &mirror->resource);
+ return mirror;
+
+err_mirror_add:
+ kfree(mirror);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+ u8 mirror_agent)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port, bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ ingress);
+ if (IS_ERR(mirror))
+ return PTR_ERR(mirror);
+
+ err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+ if (err)
+ goto err_append_allocated_mirror;
+
+ return 0;
+
+err_append_allocated_mirror:
+ mlxsw_afa_mirror_destroy(block, mirror);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload,
mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
}
-int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index)
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_POLCNT_CODE,
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
@@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
counter_index);
return 0;
}
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index)
+{
+ struct mlxsw_afa_counter *counter;
+ u32 counter_index;
+ int err;
+
+ counter = mlxsw_afa_counter_create(block);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+ counter_index = counter->counter_index;
+
+ err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+ if (err)
+ goto err_append_allocated_counter;
+
+ if (p_counter_index)
+ *p_counter_index = counter_index;
+ return 0;
+
+err_append_allocated_counter:
+ mlxsw_afa_counter_destroy(block, counter);
+ return err;
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
/* Virtual Router and Forwarding Domain Action
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index a8d3314c3a24..43132293475c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -46,6 +46,12 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+ int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+ void (*counter_index_put)(void *priv, unsigned int counter_index);
+ int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id);
+ void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress);
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index);
+ u32 *p_counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
- &mlxsw_i2c_bus, mlxsw_i2c);
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
- mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
struct mlxsw_item {
unsigned short offset; /* bytes in container */
- unsigned short step; /* step in bytes for indexed items */
+ short step; /* step in bytes for indexed items */
unsigned short in_step_offset; /* offset within one step */
unsigned char shift; /* shift in bits */
unsigned char element_size; /* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 6ef20e5cc77d..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
- u32 single_size, double_size, linear_size;
-
- if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
- return -EIO;
-
- linear_size = profile->kvd_linear_size;
+ u64 single_size, double_size, linear_size;
+ int err;
- /* The hash part is what left of the kvd without the
- * linear part. It is split to the single size and
- * double size by the parts ratio from the profile.
- * Both sizes must be a multiplications of the
- * granularity from the profile.
- */
- double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
- double_size *= profile->kvd_hash_double_parts;
- double_size /= profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts;
- double_size /= profile->kvd_hash_granularity;
- double_size *= profile->kvd_hash_granularity;
- single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
- linear_size;
-
- /* Check results are legal. */
- if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
- double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
- MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
- return -EIO;
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static const struct mlxsw_bus mlxsw_pci_bus = {
- .kind = "pci",
- .init = mlxsw_pci_init,
- .fini = mlxsw_pci_fini,
- .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
- .skb_transmit = mlxsw_pci_skb_transmit,
- .cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
-};
-
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+ mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
+ .reset = mlxsw_pci_reset,
+};
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
- &mlxsw_pci_bus, mlxsw_pci);
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
- mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
}
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3837ca7a705..3dcc58d61506 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1530
#define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
- .major = MLXSW_FWREV_MAJOR,
- .minor = MLXSW_FWREV_MINOR,
- .subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
}
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
- const struct mlxsw_fw_rev *b)
-{
- if (a->major != b->major)
- return a->major > b->major;
- if (a->minor != b->minor)
- return a->minor > b->minor;
- return a->subminor >= b->subminor;
-}
-
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct firmware *firmware;
int err;
- if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ /* Validate driver & FW are compatible */
+ if (rev->major != MLXSW_FWREV_MAJOR) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, MLXSW_FWREV_MAJOR);
+ return -EINVAL;
+ }
+ if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-static struct mlxsw_sp_span_entry *
+struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
int i;
@@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char mpar_pl[MLXSW_REG_MPAR_LEN];
- char sbib_pl[MLXSW_REG_SBIB_LEN];
int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
int err;
/* if it is an egress SPAN, bind a shared buffer to it */
@@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
}
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
- if (err)
- goto err_mpar_reg_write;
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
if (!inspected_port) {
@@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
return 0;
-err_mpar_reg_write:
err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
@@ -727,25 +737,22 @@ err_inspected_port_alloc:
}
static void
-mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
char sbib_pl[MLXSW_REG_SBIB_LEN];
- int pa_id = span_entry->id;
inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
if (!inspected_port)
return;
- /* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
/* remove the SBIB buffer if it was egress SPAN */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
@@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
kfree(inspected_port);
}
-static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type)
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
struct mlxsw_sp_span_entry *span_entry;
@@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
- err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
if (err)
goto err_port_bind;
@@ -784,9 +791,8 @@ err_port_bind:
return err;
}
-static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type)
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
span_entry->id);
- mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
const struct tc_action *a,
bool ingress)
{
- struct net *net = dev_net(mlxsw_sp_port->dev);
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
- int ifindex;
- ifindex = tcf_mirred_ifindex(a);
- to_dev = __dev_get_by_index(net, ifindex);
+ to_dev = tcf_mirred_dev(a);
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
@@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
+ true);
}
static void
@@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
- span_type);
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ span_type, true);
}
static int
@@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (f->common.chain_index)
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
@@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f,
- bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+ struct tc_cls_flower_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case TC_CLSFLOWER_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+ mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case TC_CLSFLOWER_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
- if (!tc_can_offload(mlxsw_sp_port->dev))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSMATCHALL:
+ if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
+ type_data))
+ return -EOPNOTSUPP;
+
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
- ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ case TC_SETUP_CLSFLOWER:
+ if (mlxsw_sp_acl_block_disabled(acl_block))
+ return -EOPNOTSUPP;
+
+ return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb) {
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ if (!acl_block)
+ return -ENOMEM;
+ block_cb = __tcf_block_cb_register(block,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ acl_block = tcf_block_cb_priv(block_cb);
+ }
+ tcf_block_cb_incref(block_cb);
+ err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = acl_block;
+ else
+ mlxsw_sp_port->eg_acl_block = acl_block;
+
+ return 0;
+
+err_block_bind:
+ if (!tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+err_cb_register:
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
+ return err;
}
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = NULL;
+ else
+ mlxsw_sp_port->eg_acl_block = NULL;
+
+ acl_block = tcf_block_cb_priv(block_cb);
+ err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_block_offload *f)
{
tc_setup_cb_t *cb;
+ bool ingress;
+ int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_ig;
- else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_eg;
- else
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ ingress = true;
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ ingress = false;
+ } else {
return -EOPNOTSUPP;
+ }
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+ f->block, ingress);
+ if (err) {
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return err;
+ }
+ return 0;
case TC_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+ f->block, ingress);
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return 0;
default:
@@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_RED:
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
}
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ if (!enable) {
+ if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+ mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+ !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ } else {
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ }
+ return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlxsw_sp_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ dev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(dev, enable);
+ if (err) {
+ netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ if (enable)
+ dev->features |= feature;
+ else
+ dev->features &= ~feature;
+
+ return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ mlxsw_sp_feature_hw_tc);
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
+ .ndo_set_features = mlxsw_sp_set_features,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3249,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+ u64 size)
+{
+ const struct mlxsw_config_profile *profile;
+
+ profile = &mlxsw_sp_config_profile;
+ if (size % profile->kvd_hash_granularity) {
+ NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+ return false;
+ }
+ return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+ .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ /* KVD top resource */
+ mlxsw_sp_kvd_size_params.size_min = kvd_size;
+ mlxsw_sp_kvd_size_params.size_max = kvd_size;
+ mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Linear part init */
+ mlxsw_sp_linear_size_params.size_min = linear_size_min;
+ mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+ double_size_min;
+ mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash double part init */
+ mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+ mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash single part init */
+ mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+ mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ true, kvd_size,
+ MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &mlxsw_sp_kvd_size_params,
+ &mlxsw_sp_resource_kvd_ops);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ false, linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_linear_size_params,
+ &mlxsw_sp_resource_kvd_linear_ops);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ false, double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_double_size_params,
+ &mlxsw_sp_resource_kvd_hash_double_ops);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ false, single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_single_size_params,
+ &mlxsw_sp_resource_kvd_hash_single_ops);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ profile->kvd_hash_granularity);
+ }
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 05ce1befd9b3..bdd8f94a452c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,7 +258,10 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
+ unsigned acl_rule_count;
+ struct mlxsw_sp_acl_block *ing_acl_block;
+ struct mlxsw_sp_acl_block *eg_acl_block;
};
static inline bool
@@ -404,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
+ u8 destination_port,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -458,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
unsigned int counter_index;
- bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -477,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops {
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct net_device *dev, bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -498,17 +503,34 @@ struct mlxsw_sp_acl_ops {
enum mlxsw_sp_acl_profile profile);
};
+struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
@@ -532,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -575,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
/* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..0897a5435cc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
#include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_ruleset_ht_key {
- struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_acl_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+ return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->mlxsw_sp = mlxsw_sp;
+ return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+ return 0;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ops_ruleset_add;
- return ruleset;
-
-err_ops_ruleset_add:
- rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
- kfree(ruleset);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
- ops->ruleset_del(mlxsw_sp, ruleset->priv);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset,
- struct net_device *dev, bool ingress,
- u32 chain_index)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- int err;
-
- ruleset->ht_key.dev = dev;
- ruleset->ht_key.ingress = ingress;
- ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
- return err;
- if (!ruleset->ht_key.chain_index) {
+ goto err_ht_insert;
+
+ if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
- err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
- goto err_ops_ruleset_bind;
+ goto err_ruleset_bind;
}
- return 0;
-err_ops_ruleset_bind:
+ return ruleset;
+
+err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
- return err;
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!ruleset->ht_key.chain_index)
- ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ if (!chain_index)
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
}
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
{
if (--ruleset->ref_count)
return;
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
- bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
- ht_key.dev = dev;
- ht_key.ingress = ingress;
+ ht_key.block = block;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- int err;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
- if (IS_ERR(ruleset))
- return ruleset;
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
- ingress, chain_index);
- if (err)
- goto err_ruleset_bind;
- return ruleset;
-
-err_ruleset_bind:
- mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
- return ERR_PTR(err);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
-static int
-mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- int err;
-
- err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
- if (err)
- return err;
- rulei->counter_valid = true;
- return 0;
-}
-
-static void
-mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- rulei->counter_valid = false;
- mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
-}
-
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_port *out_port;
+ struct mlxsw_sp_port *in_port;
+
+ if (!list_is_singular(&block->binding_list))
+ return -EOPNOTSUPP;
+
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_acl_block_binding, list);
+ in_port = binding->mlxsw_sp_port;
+ if (!mlxsw_sp_port_dev_check(out_dev))
+ return -EINVAL;
+
+ out_port = netdev_priv(out_dev);
+ if (out_port->mlxsw_sp != mlxsw_sp)
+ return -EINVAL;
+
+ return mlxsw_afa_block_append_mirror(rulei->act_block,
+ in_port->local_port,
+ out_port->local_port,
+ binding->ingress);
+}
+
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio)
@@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- rulei->counter_index);
+ &rulei->counter_index);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
- if (err)
- goto err_counter_alloc;
return rule;
-err_counter_alloc:
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ ruleset->ht_key.block->rule_count++;
return 0;
err_rhashtable_insert:
@@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 4d3340ed0291..6ca6894125f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
}
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id)
+{
+ struct mlxsw_sp_port *in_port, *out_port;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ enum mlxsw_sp_span_type type;
+ int err;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ out_port = mlxsw_sp->ports[local_out_port];
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
+ if (!span_entry) {
+ err = -ENOENT;
+ goto err_span_entry_find;
+ }
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_span_entry_find:
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *in_port;
+ enum mlxsw_sp_span_type type;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+}
+
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
.kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
struct list_head region_list;
unsigned int region_count;
struct rhashtable chunk_ht;
- struct {
- u16 local_port;
- bool ingress;
- } bound;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- struct mlxsw_sp_port *mlxsw_sp_port;
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- if (!mlxsw_sp_port_dev_check(dev))
- return -EINVAL;
-
- mlxsw_sp_port = netdev_priv(dev);
- group->bound.local_port = mlxsw_sp_port->local_port;
- group->bound.ingress = ingress;
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
group->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
group->id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
- dev, ingress);
+ mlxsw_sp_port, ingress);
}
static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv)
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_port, ingress);
}
static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e57857ea4..6ce00e28d4ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
- ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
- ingress,
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = __dev_get_by_index(dev_net(dev), ifindex);
- if (out_dev == dev)
- out_dev = NULL;
-
+ out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev);
if (err)
return err;
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ struct net_device *out_dev = tcf_mirred_dev(a);
+
+ err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+ block, out_dev);
+ if (err)
+ return err;
} else if (is_tcf_vlan(a)) {
u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
u32 action = tcf_vlan_action(a);
@@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
@@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
- rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
}
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
if (err)
goto err_flower_parse;
@@ -435,15 +436,15 @@ err_rule_create:
return err;
}
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
@@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
u64 bytes;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (part->info->end_index -
+ part->info->start_index + 1) /
+ part->info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += part->info->alloc_size;
+ return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl_part *part;
+ u64 occ = 0;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+ occ += mlxsw_sp_kvdl_part_occ(part);
+
+ return occ;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 34a0b632e5dd..4c7f32d4288d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
if (!afa_block)
return ERR_PTR(-ENOMEM);
- err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ counter_index);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index b5397da94d7f..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,6 +41,150 @@
#include "spectrum.h"
#include "reg.h"
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -80,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -162,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+}
- mlxsw_sp_qdisc->handle = handle;
- return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+ u64 backlog;
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops;
+ u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+ backlog = xstats->backlog[tclass_num];
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ if (p->bands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ int tclass, i;
+ int err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ drops += xstats->tail_drop[i];
+ backlog += xstats->backlog[i];
+ }
+ drops = drops - stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ stats_base->drops += xstats->tail_drop[i];
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_PRIO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7042c855a5d6..f0b25baba09a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
@@ -84,6 +85,8 @@ struct mlxsw_sp_router {
struct rhashtable nexthop_ht;
struct list_head nexthop_list;
struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
} lpm;
@@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
};
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib,
+ u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib);
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
-static bool
-mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
-
- return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
-}
-
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
@@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -453,8 +457,6 @@ struct mlxsw_sp_fib {
struct list_head node_list;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
- unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
- struct mlxsw_sp_prefix_usage prefix_usage;
enum mlxsw_sp_l3proto proto;
};
@@ -469,12 +471,15 @@ struct mlxsw_sp_vr {
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
return ERR_PTR(-ENOMEM);
@@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
INIT_LIST_HEAD(&fib->node_list);
fib->proto = proto;
fib->vr = vr;
+ fib->lpm_tree = lpm_tree;
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+ if (err)
+ goto err_lpm_tree_bind;
return fib;
+err_lpm_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_rhashtable_init:
kfree(fib);
return ERR_PTR(err);
}
-static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib)
{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
WARN_ON(!list_empty(&fib->node_list));
- WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
@@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+ lpm_tree->ref_count = 1;
return lpm_tree;
err_left_struct_set:
@@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
- prefix_usage))
+ prefix_usage)) {
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
return lpm_tree;
+ }
}
return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
@@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
struct mlxsw_sp_lpm_tree *lpm_tree;
u64 max_trees;
- int i;
+ int err, i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
return -EIO;
@@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv4_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv6_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
return 0;
+
+err_ipv6_tree_get:
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+ kfree(mlxsw_sp->router->lpm.trees);
+ return err;
}
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
kfree(mlxsw_sp->router->lpm.trees);
}
@@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+ vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(vr->fib6)) {
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
@@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
return vr;
err_mr_table_create:
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
err_fib6_create:
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
{
mlxsw_sp_mr_table_destroy(vr->mr4_table);
vr->mr4_table = NULL;
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
}
@@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
mlxsw_sp_mr_table_empty(vr->mr4_table))
- mlxsw_sp_vr_destroy(vr);
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
static bool
@@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
if (!mlxsw_sp_vr_is_used(vr))
return false;
- if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
+ if (fib->lpm_tree->id == tree_id)
return true;
return false;
}
@@ -839,14 +891,13 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib,
struct mlxsw_sp_lpm_tree *new_tree)
{
- struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
enum mlxsw_sp_l3proto proto = fib->proto;
+ struct mlxsw_sp_lpm_tree *old_tree;
u8 old_id, new_id = new_tree->id;
struct mlxsw_sp_vr *vr;
int i, err;
- if (!old_tree)
- goto no_replace;
+ old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
old_id = old_tree->id;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
@@ -860,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
goto err_tree_replace;
}
+ memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+ sizeof(new_tree->prefix_ref_count));
+ mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
return 0;
err_tree_replace:
@@ -871,36 +927,6 @@ err_tree_replace:
old_tree);
}
return err;
-
-no_replace:
- fib->lpm_tree = new_tree;
- mlxsw_sp_lpm_tree_hold(new_tree);
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err) {
- mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
- fib->lpm_tree = NULL;
- return err;
- }
- return 0;
-}
-
-static void
-mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- struct mlxsw_sp_prefix_usage *req_prefix_usage)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
- unsigned char prefix;
-
- if (!mlxsw_sp_vr_is_used(vr))
- continue;
- mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
- mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
- }
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -2630,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
- const struct in6_addr *gw, int ifindex)
+ const struct in6_addr *gw, int ifindex,
+ int weight)
{
int i;
@@ -2638,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
const struct mlxsw_sp_nexthop *nh;
nh = &nh_grp->nexthops[i];
- if (nh->ifindex == ifindex &&
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
}
@@ -2657,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct in6_addr *gw;
- int ifindex;
+ int ifindex, weight;
ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
- if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
return false;
}
@@ -4192,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
}
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- /* Since the tree is shared between all virtual routers we must
- * make sure it contains all the required prefix lengths. This
- * can be computed by either adding the new prefix length to the
- * existing prefix usage of a bound tree, or by aggregating the
- * prefix lengths across all virtual routers and adding the new
- * one as well.
- */
- if (fib->lpm_tree)
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &fib->lpm_tree->prefix_usage);
- else
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+ if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ goto out;
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
return PTR_ERR(lpm_tree);
- if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
- return 0;
-
err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
if (err)
- return err;
+ goto err_lpm_tree_replace;
+out:
+ lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
return 0;
-}
-static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib)
-{
- if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
- return;
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
- fib->lpm_tree = NULL;
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ return err;
}
-static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
{
- unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_fib *fib = fib_node->fib;
+ int err;
- if (fib->prefix_ref_count[prefix_len]++ == 0)
- mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-}
+ if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ return;
+ /* Try to construct a new LPM tree from the current prefix usage
+ * minus the unused one. If we fail, continue using the old one.
+ */
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+ fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return;
-static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
-{
- unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->fib;
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
- if (--fib->prefix_ref_count[prefix_len] == 0)
- mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ return;
+
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
}
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
@@ -4267,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
return err;
fib_node->fib = fib;
- err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
+ err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
if (err)
goto err_fib_lpm_tree_link;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib_lpm_tree_link:
@@ -4286,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib *fib = fib_node->fib;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
- mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
+ mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
}
@@ -4326,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
err_fib_node_init:
mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -4339,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static struct mlxsw_sp_fib4_entry *
@@ -4764,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = 1;
+ nh->nh_weight = rt->rt6i_nh_weight;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
@@ -5362,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int
@@ -5399,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -6048,7 +6072,7 @@ err_fid_get:
err_rif_alloc:
err_rif_index_alloc:
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -6071,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_put(fid);
kfree(rif);
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static void
@@ -6861,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
return 0;
err_loopback_op:
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
return err;
}
@@ -6875,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
--ul_vr->rif_count;
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
@@ -7010,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
}
#endif
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7022,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
@@ -7097,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_mp_hash_init;
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -7106,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier: