aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c690
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c714
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h4
36 files changed, 3318 insertions, 487 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
new file mode 100644
index 000000000000..e7c14c0de0a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "channels.h"
+#include "en.h"
+#include "en/ptp.h"
+
+unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
+{
+ return chs->num;
+}
+
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c;
+
+ WARN_ON(ix >= mlx5e_channels_get_num(chs));
+ c = chs->c[ix];
+
+ *rqn = c->rq.rqn;
+}
+
+bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c;
+
+ WARN_ON(ix >= mlx5e_channels_get_num(chs));
+ c = chs->c[ix];
+
+ if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ return false;
+
+ *rqn = c->xskrq.rqn;
+ return true;
+}
+
+bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
+{
+ struct mlx5e_ptp *c = chs->ptp;
+
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
+ return false;
+
+ *rqn = c->rq.rqn;
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
new file mode 100644
index 000000000000..ca00cbc827cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_CHANNELS_H__
+#define __MLX5_EN_CHANNELS_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_channels;
+
+unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
+
+#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index bc33eaada3b9..86e079310ac3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -55,19 +55,15 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
- if (dl_port->registered)
- devlink_port_unregister(dl_port);
+ devlink_port_unregister(dl_port);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct devlink_port *port;
if (!netif_device_present(dev))
return NULL;
- port = mlx5e_devlink_get_dl_port(priv);
- if (port->registered)
- return port;
- return NULL;
+
+ return mlx5e_devlink_get_dl_port(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1d5ce07b83f4..41684a6c44e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -5,6 +5,9 @@
#define __MLX5E_FLOW_STEER_H__
#include "mod_hdr.h"
+#include "lib/fs_ttc.h"
+
+struct mlx5e_post_act;
enum {
MLX5E_TC_FT_LEVEL = 0,
@@ -18,6 +21,7 @@ struct mlx5e_tc_table {
struct mutex t_lock;
struct mlx5_flow_table *t;
struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
struct rhashtable ht;
@@ -67,27 +71,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP,
- MLX5E_TT_IPV6_TCP,
- MLX5E_TT_IPV4_UDP,
- MLX5E_TT_IPV6_UDP,
- MLX5E_TT_IPV4_IPSEC_AH,
- MLX5E_TT_IPV6_IPSEC_AH,
- MLX5E_TT_IPV4_IPSEC_ESP,
- MLX5E_TT_IPV6_IPSEC_ESP,
- MLX5E_TT_IPV4,
- MLX5E_TT_IPV6,
- MLX5E_TT_ANY,
- MLX5E_NUM_TT,
- MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-struct mlx5e_tirc_config {
- u8 l3_prot_type;
- u8 l4_prot_type;
- u32 rx_hash_fields;
-};
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -99,30 +83,6 @@ struct mlx5e_tirc_config {
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-enum mlx5e_tunnel_types {
- MLX5E_TT_IPV4_GRE,
- MLX5E_TT_IPV6_GRE,
- MLX5E_TT_IPV4_IPIP,
- MLX5E_TT_IPV6_IPIP,
- MLX5E_TT_IPV4_IPV6,
- MLX5E_TT_IPV6_IPV6,
- MLX5E_NUM_TUNNEL_TT,
-};
-
-bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
-
-struct mlx5e_ttc_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_destination default_dest;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
- struct mlx5e_flow_table ft;
- struct mlx5e_ttc_rule rules[MLX5E_NUM_TT];
- struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
-};
-
/* NIC prio FTS */
enum {
MLX5E_PROMISC_FT_LEVEL,
@@ -144,21 +104,7 @@ enum {
#endif
};
-#define MLX5E_TTC_NUM_GROUPS 3
-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
- MLX5E_TTC_GROUP2_SIZE +\
- MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS 3
-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
- MLX5E_INNER_TTC_GROUP2_SIZE +\
- MLX5E_INNER_TTC_GROUP3_SIZE)
+struct mlx5e_priv;
#ifdef CONFIG_MLX5_EN_RXNFC
@@ -226,8 +172,8 @@ struct mlx5e_flow_steering {
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
- struct mlx5e_ttc_table ttc;
- struct mlx5e_ttc_table inner_ttc;
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_ttc_table *inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *arfs;
#endif
@@ -239,33 +185,13 @@ struct mlx5e_flow_steering {
struct mlx5e_ptp_fs *ptp_fs;
};
-struct ttc_params {
- struct mlx5_flow_table_attr ft_attr;
- u32 any_tt_tirn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table *inner_ttc;
-};
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
+void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params, bool tunnel);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
- struct mlx5_flow_destination *new_dest);
-struct mlx5_flow_destination
-mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
-int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
@@ -273,7 +199,6 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 909faa6c89d7..7aa25a5e29d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -33,22 +33,22 @@ static char *fs_udp_type2str(enum fs_udp_type i)
}
}
-static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i)
+static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i)
{
switch (i) {
case FS_IPV4_UDP:
- return MLX5E_TT_IPV4_UDP;
+ return MLX5_TT_IPV4_UDP;
default: /* FS_IPV6_UDP */
- return MLX5E_TT_IPV6_UDP;
+ return MLX5_TT_IPV6_UDP;
}
}
-static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i)
+static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i)
{
switch (i) {
- case MLX5E_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_UDP:
return FS_IPV4_UDP;
- case MLX5E_TT_IPV6_UDP:
+ case MLX5_TT_IPV6_UDP:
return FS_IPV6_UDP;
default:
return FS_UDP_NUM_TYPES;
@@ -75,7 +75,7 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types ttc_type,
+ enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
enum fs_udp_type type = tt2fs_udp(ttc_type);
@@ -124,7 +124,7 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
fs_udp = priv->fs.udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -281,7 +281,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.ft = priv->fs.udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -401,7 +401,7 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
fs_any = priv->fs.any;
fs_any_t = &fs_any->table;
- dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -514,11 +514,11 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, MLX5E_TT_ANY, err);
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
@@ -533,11 +533,11 @@ static int fs_any_enable(struct mlx5e_priv *priv)
dest.ft = priv->fs.any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, MLX5E_TT_ANY, err);
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
index 8385df24eb99..7a70c4f38fda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -12,7 +12,7 @@ void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types ttc_type,
+ enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
index ea321e528749..4e72ca8070e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -5,11 +5,15 @@
#include <linux/slab.h>
#include <linux/xarray.h>
#include <linux/hashtable.h>
+#include <linux/refcount.h>
#include "mapping.h"
#define MAPPING_GRACE_PERIOD 2000
+static LIST_HEAD(shared_ctx_list);
+static DEFINE_MUTEX(shared_ctx_lock);
+
struct mapping_ctx {
struct xarray xarray;
DECLARE_HASHTABLE(ht, 8);
@@ -20,6 +24,10 @@ struct mapping_ctx {
struct delayed_work dwork;
struct list_head pending_list;
spinlock_t pending_list_lock; /* Guards pending list */
+ u64 id;
+ u8 type;
+ struct list_head list;
+ refcount_t refcount;
};
struct mapping_item {
@@ -205,11 +213,48 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
mutex_init(&ctx->lock);
xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
+ refcount_set(&ctx->refcount, 1);
+ INIT_LIST_HEAD(&ctx->list);
+
+ return ctx;
+}
+
+struct mapping_ctx *
+mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
+{
+ struct mapping_ctx *ctx;
+
+ mutex_lock(&shared_ctx_lock);
+ list_for_each_entry(ctx, &shared_ctx_list, list) {
+ if (ctx->id == id && ctx->type == type) {
+ if (refcount_inc_not_zero(&ctx->refcount))
+ goto unlock;
+ break;
+ }
+ }
+
+ ctx = mapping_create(data_size, max_id, delayed_removal);
+ if (IS_ERR(ctx))
+ goto unlock;
+
+ ctx->id = id;
+ ctx->type = type;
+ list_add(&ctx->list, &shared_ctx_list);
+
+unlock:
+ mutex_unlock(&shared_ctx_lock);
return ctx;
}
void mapping_destroy(struct mapping_ctx *ctx)
{
+ if (!refcount_dec_and_test(&ctx->refcount))
+ return;
+
+ mutex_lock(&shared_ctx_lock);
+ list_del(&ctx->list);
+ mutex_unlock(&shared_ctx_lock);
+
mapping_flush_work(ctx);
xa_destroy(&ctx->xarray);
mutex_destroy(&ctx->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
index 285525cc5470..4e2119f0f4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -24,4 +24,9 @@ struct mapping_ctx *mapping_create(size_t data_size, u32 max_id,
bool delayed_removal);
void mapping_destroy(struct mapping_ctx *ctx);
+/* adds mapping with an id or get an existing mapping with the same id
+ */
+struct mapping_ctx *
+mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal);
+
#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 2cbf18c967f7..3cbb596821e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -167,6 +167,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
+{
+ struct mlx5e_lro_param lro_param;
+
+ lro_param = (struct mlx5e_lro_param) {
+ .enabled = params->lro_en,
+ .timeout = params->lro_timeout,
+ };
+
+ return lro_param;
+}
+
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index e9593f5f0661..879ad46d754e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -11,6 +11,11 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
+struct mlx5e_lro_param {
+ bool enabled;
+ u32 timeout;
+};
+
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -120,6 +125,7 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index efef4adce086..ee688dec67a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -326,13 +326,14 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
+ u8 num_tc = mlx5e_get_dcb_num_tc(params);
int ix_base;
int err;
int tc;
- ix_base = params->num_tc * params->num_channels;
+ ix_base = num_tc * params->num_channels;
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
int txq_ix = ix_base + tc;
err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
@@ -365,9 +366,12 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder ptp_moder = {};
struct mlx5e_cq_param *cq_param;
+ u8 num_tc;
int err;
int tc;
+ num_tc = mlx5e_get_dcb_num_tc(params);
+
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -375,7 +379,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
cq_param = &cparams->txq_sq_param.cqp;
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
@@ -383,7 +387,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
goto out_err_txqsq_cq;
}
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
@@ -399,7 +403,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
out_err_ts_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
- tc = params->num_tc;
+ tc = num_tc;
out_err_txqsq_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
@@ -475,7 +479,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->num_channels = orig->num_channels;
params->hard_mtu = orig->hard_mtu;
params->sw_mtu = orig->sw_mtu;
- params->num_tc = orig->num_tc;
+ params->mqprio = orig->mqprio;
/* SQ */
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
@@ -605,9 +609,9 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
+ u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
struct mlx5_flow_handle *rule;
- u32 tirn = priv->ptp_tir.tirn;
int err;
if (ptp_fs->valid)
@@ -617,7 +621,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
if (err)
goto out_free;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -625,7 +629,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -680,7 +684,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
- c->num_tc = params->num_tc;
+ c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 5efe3278b0f6..e8a8d78e3e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -132,7 +132,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
*/
bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
- return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
+ return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
@@ -733,8 +733,8 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc));
}
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
- u16 *new_qid, struct netlink_ext_ack *extack)
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node;
struct netdev_queue *txq;
@@ -742,11 +742,9 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
bool opened;
int err;
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
-
- *old_qid = *new_qid = 0;
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
- node = mlx5e_sw_node_find(priv, classid);
+ node = mlx5e_sw_node_find(priv, *classid);
if (!node)
return -ENOENT;
@@ -764,7 +762,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
+ node->hw_id, *classid, err);
mlx5e_sw_node_delete(priv, node);
@@ -826,8 +824,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
if (opened)
mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
- *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
- *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
+ *classid = node->classid;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 5af7991fcd19..757682b7c0e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -34,8 +34,8 @@ int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
- u16 *new_qid, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
+ struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
struct netlink_ext_ack *extack);
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 3c0032c9647c..0c38c2e319be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -15,9 +15,116 @@ struct mlx5_bridge_switchdev_fdb_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct mlx5_esw_bridge_offloads *br_offloads;
bool add;
};
+static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return esw == priv->mdev->priv.eswitch;
+}
+
+static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev, *esw_mdev;
+ u64 system_guid, esw_system_guid;
+
+ mdev = priv->mdev;
+ esw_mdev = esw->dev;
+
+ system_guid = mlx5_query_nic_system_image_guid(mdev);
+ esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
+
+ return system_guid == esw_system_guid;
+}
+
+static struct net_device *
+mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(lower))
+ continue;
+
+ priv = netdev_priv(lower);
+ mdev = priv->mdev;
+ if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ return lower;
+ }
+
+ return NULL;
+}
+
+static struct net_device *
+mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
+ u16 *vport_num, u16 *esw_owner_vhca_id)
+{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+
+ if (netif_is_lag_master(dev))
+ dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
+
+ if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
+ return NULL;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ *vport_num = rpriv->rep->vport;
+ *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
+ return dev;
+}
+
+static struct net_device *
+mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
+ u16 *vport_num, u16 *esw_owner_vhca_id)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
+ return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
+ esw_owner_vhca_id);
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ struct net_device *rep;
+
+ if (netif_is_bridge_master(lower_dev))
+ continue;
+
+ rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
+ esw_owner_vhca_id);
+ if (rep)
+ return rep;
+ }
+
+ return NULL;
+}
+
+static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
+ return false;
+
+ priv = netdev_priv(rep);
+ mdev = priv->mdev;
+ if (netif_is_lag_master(dev))
+ return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return true;
+}
+
static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
{
struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
@@ -25,37 +132,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
netdev_nb);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev, *rep;
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct net_device *upper;
- struct mlx5e_priv *priv;
- u16 vport_num;
-
- if (!mlx5e_eswitch_rep(dev))
- return 0;
+ int ifindex = upper->ifindex;
+ int err;
- upper = info->upper_dev;
if (!netif_is_bridge_master(upper))
return 0;
- esw = br_offloads->esw;
- priv = netdev_priv(dev);
- if (esw != priv->mdev->priv.eswitch)
+ rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
+ if (!rep)
return 0;
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
extack = netdev_notifier_info_to_extack(&info->info);
- return info->linking ?
- mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
- mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
+ if (mlx5_esw_bridge_is_local(dev, rep, esw))
+ err = info->linking ?
+ mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack) :
+ mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack);
+ else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
+ err = info->linking ?
+ mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack) :
+ mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack);
+
+ return err;
}
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
@@ -75,31 +181,28 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
return notifier_from_errno(err);
}
-static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
- const void *ctx,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int
+mlx5_esw_bridge_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
+ struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
- int err = 0;
+ u16 vport_num, esw_owner_vhca_id;
+ int err;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_obj_info->handled = true;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
+ err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
+ vlan->flags, br_offloads, extack);
break;
default:
return -EOPNOTSUPP;
@@ -107,29 +210,25 @@ static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
return err;
}
-static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
- const void *ctx,
- const struct switchdev_obj *obj)
+static int
+mlx5_esw_bridge_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
+ const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
+ u16 vport_num, esw_owner_vhca_id;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_obj_info->handled = true;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
+ mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
break;
default:
return -EOPNOTSUPP;
@@ -137,25 +236,21 @@ static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
return 0;
}
-static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
- const void *ctx,
- const struct switchdev_attr *attr,
- struct netlink_ext_ack *extack)
+static int
+mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
- int err = 0;
+ struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
+ const struct switchdev_attr *attr = port_attr_info->attr;
+ u16 vport_num, esw_owner_vhca_id;
+ int err;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_attr_info->handled = true;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
@@ -167,10 +262,12 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
+ err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
+ attr->u.ageing_time, br_offloads);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
+ err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
+ attr->u.vlan_filtering, br_offloads);
break;
default:
err = -EOPNOTSUPP;
@@ -179,27 +276,24 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
return err;
}
-static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
+static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
unsigned long event, void *ptr)
{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ nb_blk);
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = switchdev_handle_port_obj_add(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_add);
+ err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
break;
case SWITCHDEV_PORT_OBJ_DEL:
- err = switchdev_handle_port_obj_del(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_del);
+ err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
break;
case SWITCHDEV_PORT_ATTR_SET:
- err = switchdev_handle_port_attr_set(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_attr_set);
+ err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
break;
default:
err = 0;
@@ -222,27 +316,23 @@ static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
struct switchdev_notifier_fdb_info *fdb_info =
&fdb_work->fdb_info;
+ struct mlx5_esw_bridge_offloads *br_offloads =
+ fdb_work->br_offloads;
struct net_device *dev = fdb_work->dev;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
+ u16 vport_num, esw_owner_vhca_id;
rtnl_lock();
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
goto out;
if (fdb_work->add)
- mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
+ mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
else
- mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
+ mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
out:
rtnl_unlock();
@@ -251,7 +341,8 @@ out:
static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
- struct switchdev_notifier_fdb_info *fdb_info)
+ struct switchdev_notifier_fdb_info *fdb_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_bridge_switchdev_fdb_work *work;
u8 *addr;
@@ -273,6 +364,7 @@ mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
dev_hold(dev);
work->dev = dev;
+ work->br_offloads = br_offloads;
work->add = add;
return work;
}
@@ -286,20 +378,14 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_fdb_info *fdb_info;
struct mlx5_bridge_switchdev_fdb_work *work;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct switchdev_notifier_info *info = ptr;
- struct net_device *upper;
- struct mlx5e_priv *priv;
-
- if (!mlx5e_eswitch_rep(dev))
- return NOTIFY_DONE;
- priv = netdev_priv(dev);
- if (priv->mdev->priv.eswitch != br_offloads->esw)
- return NOTIFY_DONE;
+ u16 vport_num, esw_owner_vhca_id;
+ struct net_device *upper, *rep;
if (event == SWITCHDEV_PORT_ATTR_SET) {
- int err = switchdev_handle_port_attr_set(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_attr_set);
+ int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
+
return notifier_from_errno(err);
}
@@ -309,7 +395,27 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!netif_is_bridge_master(upper))
return NOTIFY_DONE;
+ rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
+ if (!rep)
+ return NOTIFY_DONE;
+
switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ /* only handle the event on native eswtich of representor */
+ if (!mlx5_esw_bridge_is_local(dev, rep, esw))
+ break;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ /* only handle the event on peers */
+ if (mlx5_esw_bridge_is_local(dev, rep, esw))
+ break;
+ fallthrough;
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = container_of(info,
@@ -318,7 +424,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
event == SWITCHDEV_FDB_ADD_TO_DEVICE,
- fdb_info);
+ fdb_info,
+ br_offloads);
if (IS_ERR(work)) {
WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
PTR_ERR(work));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 059799e4f483..51a4d80f7fa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -17,7 +17,7 @@
#include "en/mapping.h"
#include "en/tc_tun.h"
#include "lib/port_tun.h"
-#include "esw/sample.h"
+#include "en/tc/sample.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -516,7 +516,6 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_indr_block_unbind);
}
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv,
u32 tunnel_id)
@@ -609,12 +608,13 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return true;
}
-static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
- struct mlx5e_tc_update_priv *tc_priv)
+static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
+ struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
if (chain) {
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -636,9 +636,25 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
zone_restore_id))
return false;
}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
+
+static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
+ netdev_dbg(priv->netdev,
+ "Failed to restore tunnel info for sampled packet\n");
+ return;
+ }
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ mlx5e_tc_sample_skb(skb, mapped_obj);
+#endif /* CONFIG_MLX5_TC_SAMPLE */
+ mlx5_rep_tc_post_napi_receive(tc_priv);
+}
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
@@ -647,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u32 reg_c0, reg_c1;
+ u32 reg_c0;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
@@ -659,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
*/
skb->mark = 0;
- reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
@@ -671,18 +685,14 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
return false;
}
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN)
- return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv);
-#endif /* CONFIG_NET_TC_SKB_EXT */
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
- if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5_esw_sample_skb(skb, &mapped_obj);
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
+ u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
+ return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
+ } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
+ mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
return false;
- }
-#endif /* CONFIG_MLX5_TC_SAMPLE */
- if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE &&
- mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) {
+ } else {
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d361efd5ff7..bb682fd751c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -372,7 +372,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
@@ -384,7 +384,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
&ptp_ch->ptpsq[tc],
tc);
@@ -494,7 +494,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
@@ -504,7 +504,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
new file mode 100644
index 000000000000..b915fb29dd2c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rqt.h"
+#include <linux/mlx5/transobj.h>
+
+void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
+ unsigned int num_channels)
+{
+ unsigned int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ indir->table[i] = i % num_channels;
+}
+
+static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u16 max_size, u32 *init_rqns, u16 init_size)
+{
+ void *rqtc;
+ int inlen;
+ int err;
+ u32 *in;
+ int i;
+
+ rqt->mdev = mdev;
+ rqt->size = max_size;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
+ for (i = 0; i < init_size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn)
+{
+ u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1;
+
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+ return inv;
+}
+
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ unsigned int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ unsigned int ix = i;
+
+ if (hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE));
+
+ ix = indir->table[ix];
+
+ if (WARN_ON(ix >= num_rqns))
+ /* Could be a bug in the driver or in the kernel part of
+ * ethtool: indir table refers to non-existent RQs.
+ */
+ return -EINVAL;
+ rss_rqns[i] = rqns[ix];
+ }
+
+ return 0;
+}
+
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
+
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
+{
+ mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
+}
+
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+{
+ unsigned int i;
+ void *rqtc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+{
+ return mlx5e_rqt_redirect(rqt, &rqn, 1);
+}
+
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE))
+ return -EINVAL;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
new file mode 100644
index 000000000000..60c985a12f24
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RQT_H__
+#define __MLX5_EN_RQT_H__
+
+#include <linux/kernel.h>
+
+#define MLX5E_INDIR_RQT_SIZE (1 << 8)
+
+struct mlx5_core_dev;
+
+struct mlx5e_rss_params_indir {
+ u32 table[MLX5E_INDIR_RQT_SIZE];
+};
+
+void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
+ unsigned int num_channels);
+
+struct mlx5e_rqt {
+ struct mlx5_core_dev *mdev;
+ u32 rqtn;
+ u16 size;
+};
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn);
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
+
+static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
+{
+ return rqt->rqtn;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+
+#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
new file mode 100644
index 000000000000..625cd49ef96c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES.
+
+#include "rss.h"
+
+#define mlx5e_rss_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
+static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
+ [MLX5_TT_IPV4_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV6_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV4_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV6_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV4_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV6_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV4_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV6_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV4] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+ [MLX5_TT_IPV6] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+};
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt)
+{
+ return rss_default_config[tt];
+}
+
+struct mlx5e_rss {
+ struct mlx5e_rss_params_hash hash;
+ struct mlx5e_rss_params_indir indir;
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_rqt rqt;
+ struct mlx5_core_dev *mdev;
+ u32 drop_rqn;
+ bool inner_ft_support;
+ bool enabled;
+ refcount_t refcnt;
+};
+
+struct mlx5e_rss *mlx5e_rss_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL);
+}
+
+void mlx5e_rss_free(struct mlx5e_rss *rss)
+{
+ kvfree(rss);
+}
+
+static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
+{
+ enum mlx5_traffic_types tt;
+
+ rss->hash.hfunc = ETH_RSS_HASH_TOP;
+ netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
+ sizeof(rss->hash.toeplitz_hash_key));
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ rss->rx_hash_fields[tt] =
+ mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
+}
+
+static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ return inner ? &rss->inner_tir[tt] : &rss->tir[tt];
+}
+
+static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ return *rss_get_tirp(rss, tt, inner);
+}
+
+static struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+
+ rss_tt = mlx5e_rss_get_default_tt_config(tt);
+ rss_tt.rx_hash_fields = rss->rx_hash_fields[tt];
+ return rss_tt;
+}
+
+static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_tir **tir_p;
+ struct mlx5e_tir *tir;
+ u32 rqtn;
+ int err;
+
+ if (inner && !rss->inner_ft_support) {
+ mlx5e_rss_warn(rss->mdev,
+ "Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
+ tt);
+ return -EINVAL;
+ }
+
+ tir_p = rss_get_tirp(rss, tt, inner);
+ if (*tir_p)
+ return -EINVAL;
+
+ tir = kvzalloc(sizeof(*tir), GFP_KERNEL);
+ if (!tir)
+ return -ENOMEM;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder) {
+ err = -ENOMEM;
+ goto free_tir;
+ }
+
+ rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
+ mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
+ rqtn, rss->inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+ mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
+
+ err = mlx5e_tir_init(tir, builder, rss->mdev, true);
+ mlx5e_tir_builder_free(builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n",
+ inner ? "inner " : "", err, tt);
+ goto free_tir;
+ }
+
+ *tir_p = tir;
+ return 0;
+
+free_tir:
+ kvfree(tir);
+ return err;
+}
+
+static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_tir **tir_p;
+ struct mlx5e_tir *tir;
+
+ tir_p = rss_get_tirp(rss, tt, inner);
+ if (!*tir_p)
+ return;
+
+ tir = *tir_p;
+ mlx5e_tir_destroy(tir);
+ kvfree(tir);
+ *tir_p = NULL;
+}
+
+static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner)
+{
+ enum mlx5_traffic_types tt, max_tt;
+ int err;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_rss_destroy_tir(rss, tt, inner);
+ return err;
+}
+
+static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner)
+{
+ enum mlx5_traffic_types tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5e_rss_destroy_tir(rss, tt, inner);
+}
+
+static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_tir *tir;
+ int err;
+
+ tir = rss_get_tir(rss, tt, inner);
+ if (!tir)
+ return 0;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+
+ mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
+ err = mlx5e_tir_modify(tir, builder);
+
+ mlx5e_tir_builder_free(builder);
+ return err;
+}
+
+static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
+{
+ enum mlx5_traffic_types tt;
+ int err, retval;
+
+ retval = 0;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5e_rss_update_tir(rss, tt, false);
+ if (err) {
+ retval = retval ? : err;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ }
+
+ if (!rss->inner_ft_support)
+ continue;
+
+ err = mlx5e_rss_update_tir(rss, tt, true);
+ if (err) {
+ retval = retval ? : err;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ }
+ }
+ return retval;
+}
+
+int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn)
+{
+ rss->mdev = mdev;
+ rss->inner_ft_support = inner_ft_support;
+ rss->drop_rqn = drop_rqn;
+
+ mlx5e_rss_params_init(rss);
+ refcount_set(&rss->refcnt, 1);
+
+ return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn);
+}
+
+int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_lro_param *init_lro_param)
+{
+ int err;
+
+ err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
+ if (err)
+ goto err_destroy_rqt;
+
+ if (inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ mlx5e_rss_destroy_tirs(rss, false);
+err_destroy_rqt:
+ mlx5e_rqt_destroy(&rss->rqt);
+err_out:
+ return err;
+}
+
+int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
+{
+ if (!refcount_dec_if_one(&rss->refcnt))
+ return -EBUSY;
+
+ mlx5e_rss_destroy_tirs(rss, false);
+
+ if (rss->inner_ft_support)
+ mlx5e_rss_destroy_tirs(rss, true);
+
+ mlx5e_rqt_destroy(&rss->rqt);
+
+ return 0;
+}
+
+void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss)
+{
+ refcount_inc(&rss->refcnt);
+}
+
+void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss)
+{
+ refcount_dec(&rss->refcnt);
+}
+
+unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss)
+{
+ return refcount_read(&rss->refcnt);
+}
+
+u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_tir *tir;
+
+ WARN_ON(inner && !rss->inner_ft_support);
+ tir = rss_get_tir(rss, tt, inner);
+ WARN_ON(!tir);
+
+ return mlx5e_tir_get_tirn(tir);
+}
+
+/* Fill the "tirn" output parameter.
+ * Create the requested TIR if it's its first usage.
+ */
+int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner, u32 *tirn)
+{
+ struct mlx5e_tir *tir;
+
+ tir = rss_get_tir(rss, tt, inner);
+ if (!tir) { /* TIR doesn't exist, create one */
+ int err;
+
+ err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ if (err)
+ return err;
+ tir = rss_get_tir(rss, tt, inner);
+ }
+
+ *tirn = mlx5e_tir_get_tirn(tir);
+ return 0;
+}
+
+static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+{
+ int err;
+
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ if (err)
+ mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
+ mlx5e_rqt_get_rqtn(&rss->rqt), err);
+}
+
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+{
+ rss->enabled = true;
+ mlx5e_rss_apply(rss, rqns, num_rqns);
+}
+
+void mlx5e_rss_disable(struct mlx5e_rss *rss)
+{
+ int err;
+
+ rss->enabled = false;
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ if (err)
+ mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
+ mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+}
+
+int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
+{
+ struct mlx5e_tir_builder *builder;
+ enum mlx5_traffic_types tt;
+ int err, final_err;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ mlx5e_tir_builder_build_lro(builder, lro_param);
+
+ final_err = 0;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ struct mlx5e_tir *tir;
+
+ tir = rss_get_tir(rss, tt, false);
+ if (!tir)
+ goto inner_tir;
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_tir_get_tirn(tir), tt, err);
+ if (!final_err)
+ final_err = err;
+ }
+
+inner_tir:
+ if (!rss->inner_ft_support)
+ continue;
+
+ tir = rss_get_tir(rss, tt, true);
+ if (!tir)
+ continue;
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_tir_get_tirn(tir), tt, err);
+ if (!final_err)
+ final_err = err;
+ }
+ }
+
+ mlx5e_tir_builder_free(builder);
+ return final_err;
+}
+
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+{
+ unsigned int i;
+
+ if (indir)
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ indir[i] = rss->indir.table[i];
+
+ if (key)
+ memcpy(key, rss->hash.toeplitz_hash_key,
+ sizeof(rss->hash.toeplitz_hash_key));
+
+ if (hfunc)
+ *hfunc = rss->hash.hfunc;
+
+ return 0;
+}
+
+int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
+ const u8 *key, const u8 *hfunc,
+ u32 *rqns, unsigned int num_rqns)
+{
+ bool changed_indir = false;
+ bool changed_hash = false;
+
+ if (hfunc && *hfunc != rss->hash.hfunc) {
+ switch (*hfunc) {
+ case ETH_RSS_HASH_XOR:
+ case ETH_RSS_HASH_TOP:
+ break;
+ default:
+ return -EINVAL;
+ }
+ changed_hash = true;
+ changed_indir = true;
+ rss->hash.hfunc = *hfunc;
+ }
+
+ if (key) {
+ if (rss->hash.hfunc == ETH_RSS_HASH_TOP)
+ changed_hash = true;
+ memcpy(rss->hash.toeplitz_hash_key, key,
+ sizeof(rss->hash.toeplitz_hash_key));
+ }
+
+ if (indir) {
+ unsigned int i;
+
+ changed_indir = true;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ rss->indir.table[i] = indir[i];
+ }
+
+ if (changed_indir && rss->enabled)
+ mlx5e_rss_apply(rss, rqns, num_rqns);
+
+ if (changed_hash)
+ mlx5e_rss_update_tirs(rss);
+
+ return 0;
+}
+
+struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
+{
+ return rss->hash;
+}
+
+u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
+{
+ return rss->rx_hash_fields[tt];
+}
+
+int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields)
+{
+ u8 old_rx_hash_fields;
+ int err;
+
+ old_rx_hash_fields = rss->rx_hash_fields[tt];
+
+ if (old_rx_hash_fields == rx_hash_fields)
+ return 0;
+
+ rss->rx_hash_fields[tt] = rx_hash_fields;
+
+ err = mlx5e_rss_update_tir(rss, tt, false);
+ if (err) {
+ rss->rx_hash_fields[tt] = old_rx_hash_fields;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ return err;
+ }
+
+ if (!(rss->inner_ft_support))
+ return 0;
+
+ err = mlx5e_rss_update_tir(rss, tt, true);
+ if (err) {
+ /* Partial update happened. Try to revert - it may fail too, but
+ * there is nothing more we can do.
+ */
+ rss->rx_hash_fields[tt] = old_rx_hash_fields;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ if (mlx5e_rss_update_tir(rss, tt, false))
+ mlx5e_rss_warn(rss->mdev,
+ "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n",
+ tt);
+ }
+
+ return err;
+}
+
+void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch)
+{
+ mlx5e_rss_params_indir_init_uniform(&rss->indir, nch);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
new file mode 100644
index 000000000000..d522a10dadf3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_RSS_H__
+#define __MLX5_EN_RSS_H__
+
+#include "rqt.h"
+#include "tir.h"
+#include "fs.h"
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
+
+struct mlx5e_rss;
+
+struct mlx5e_rss *mlx5e_rss_alloc(void);
+void mlx5e_rss_free(struct mlx5e_rss *rss);
+int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_lro_param *init_lro_param);
+int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn);
+int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
+
+void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
+void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
+unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+
+u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner);
+int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner, u32 *tirn);
+
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_disable(struct mlx5e_rss *rss);
+
+int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
+ const u8 *key, const u8 *hfunc,
+ u32 *rqns, unsigned int num_rqns);
+struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
+u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
+int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields);
+void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch);
+#endif /* __MLX5_EN_RSS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
new file mode 100644
index 000000000000..bf0313e2682b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rx_res.h"
+#include "channels.h"
+#include "params.h"
+
+#define MLX5E_MAX_NUM_RSS 16
+
+struct mlx5e_rx_res {
+ struct mlx5_core_dev *mdev;
+ enum mlx5e_rx_res_features features;
+ unsigned int max_nch;
+ u32 drop_rqn;
+
+ struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
+ bool rss_active;
+ u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
+ unsigned int rss_nch;
+
+ struct {
+ struct mlx5e_rqt direct_rqt;
+ struct mlx5e_tir direct_tir;
+ struct mlx5e_rqt xsk_rqt;
+ struct mlx5e_tir xsk_tir;
+ } *channels;
+
+ struct {
+ struct mlx5e_rqt rqt;
+ struct mlx5e_tir tir;
+ } ptp;
+};
+
+/* API for rx_res_rss_* */
+
+static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
+ const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss *rss;
+ int err;
+
+ if (WARN_ON(res->rss[0]))
+ return -EINVAL;
+
+ rss = mlx5e_rss_alloc();
+ if (!rss)
+ return -ENOMEM;
+
+ err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
+ init_lro_param);
+ if (err)
+ goto err_rss_free;
+
+ mlx5e_rss_set_indir_uniform(rss, init_nch);
+
+ res->rss[0] = rss;
+
+ return 0;
+
+err_rss_free:
+ mlx5e_rss_free(rss);
+ return err;
+}
+
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss *rss;
+ int err, i;
+
+ for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
+ if (!res->rss[i])
+ break;
+
+ if (i == MLX5E_MAX_NUM_RSS)
+ return -ENOSPC;
+
+ rss = mlx5e_rss_alloc();
+ if (!rss)
+ return -ENOMEM;
+
+ err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
+ if (err)
+ goto err_rss_free;
+
+ mlx5e_rss_set_indir_uniform(rss, init_nch);
+ if (res->rss_active)
+ mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+
+ res->rss[i] = rss;
+ *rss_idx = i;
+
+ return 0;
+
+err_rss_free:
+ mlx5e_rss_free(rss);
+ return err;
+}
+
+static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ struct mlx5e_rss *rss = res->rss[rss_idx];
+ int err;
+
+ err = mlx5e_rss_cleanup(rss);
+ if (err)
+ return err;
+
+ mlx5e_rss_free(rss);
+ res->rss[rss_idx] = NULL;
+
+ return 0;
+}
+
+int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -EINVAL;
+
+ return __mlx5e_rx_res_rss_destroy(res, rss_idx);
+}
+
+static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+ int err;
+
+ if (!rss)
+ continue;
+
+ err = __mlx5e_rx_res_rss_destroy(res, i);
+ if (err) {
+ unsigned int refcount;
+
+ refcount = mlx5e_rss_refcnt_read(rss);
+ mlx5_core_warn(res->mdev,
+ "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
+ i, refcount, err);
+ }
+ }
+}
+
+static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ res->rss_active = true;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+
+ if (!rss)
+ continue;
+ mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ }
+}
+
+static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ res->rss_active = false;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+
+ if (!rss)
+ continue;
+ mlx5e_rss_disable(rss);
+ }
+}
+
+/* Updates the indirection table SW shadow, does not update the HW resources yet */
+void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
+{
+ WARN_ON_ONCE(res->rss_active);
+ mlx5e_rss_set_indir_uniform(res->rss[0], nch);
+}
+
+int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
+
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+}
+
+int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ const u32 *indir, const u8 *key, const u8 *hfunc)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
+
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+}
+
+u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_hash_fields(rss, tt);
+}
+
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
+}
+
+int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
+{
+ int i, cnt;
+
+ cnt = 0;
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
+ if (res->rss[i])
+ cnt++;
+
+ return cnt;
+}
+
+int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
+{
+ int i;
+
+ if (!rss)
+ return -EINVAL;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
+ if (rss == res->rss[i])
+ return i;
+
+ return -ENOENT;
+}
+
+struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return NULL;
+
+ return res->rss[rss_idx];
+}
+
+/* End of API rx_res_rss_* */
+
+struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
+}
+
+static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
+ const struct mlx5e_lro_param *init_lro_param)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ int err = 0;
+ int ix;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
+ if (!res->channels) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
+ res->mdev, false, res->drop_rqn);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_direct_rqts;
+ }
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_direct_tirs;
+ }
+
+ mlx5e_tir_builder_clear(builder);
+ }
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ goto out;
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
+ res->mdev, false, res->drop_rqn);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_xsk_rqts;
+ }
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_xsk_tirs;
+ }
+
+ mlx5e_tir_builder_clear(builder);
+ }
+
+ goto out;
+
+err_destroy_xsk_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
+
+ ix = res->max_nch;
+err_destroy_xsk_rqts:
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
+
+ ix = res->max_nch;
+err_destroy_direct_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].direct_tir);
+
+ ix = res->max_nch;
+err_destroy_direct_rqts:
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
+
+ kvfree(res->channels);
+
+out:
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
+
+static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ int err;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
+ if (err)
+ goto out;
+
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
+ if (err)
+ goto err_destroy_ptp_rqt;
+
+ goto out;
+
+err_destroy_ptp_rqt:
+ mlx5e_rqt_destroy(&res->ptp.rqt);
+
+out:
+ mlx5e_tir_builder_free(builder);
+ return err;
+}
+
+static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
+{
+ unsigned int ix;
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_destroy(&res->channels[ix].direct_tir);
+ mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
+ mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
+ }
+
+ kvfree(res->channels);
+}
+
+static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
+{
+ mlx5e_tir_destroy(&res->ptp.tir);
+ mlx5e_rqt_destroy(&res->ptp.rqt);
+}
+
+int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
+ enum mlx5e_rx_res_features features, unsigned int max_nch,
+ u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch)
+{
+ int err;
+
+ res->mdev = mdev;
+ res->features = features;
+ res->max_nch = max_nch;
+ res->drop_rqn = drop_rqn;
+
+ err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_rx_res_channels_init(res, init_lro_param);
+ if (err)
+ goto err_rss_destroy;
+
+ err = mlx5e_rx_res_ptp_init(res);
+ if (err)
+ goto err_channels_destroy;
+
+ return 0;
+
+err_channels_destroy:
+ mlx5e_rx_res_channels_destroy(res);
+err_rss_destroy:
+ __mlx5e_rx_res_rss_destroy(res, 0);
+err_out:
+ return err;
+}
+
+void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
+{
+ mlx5e_rx_res_ptp_destroy(res);
+ mlx5e_rx_res_channels_destroy(res);
+ mlx5e_rx_res_rss_destroy_all(res);
+}
+
+void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
+{
+ kvfree(res);
+}
+
+u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
+}
+
+u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
+
+ return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
+}
+
+u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_tirn(rss, tt, false);
+}
+
+u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_tirn(rss, tt, true);
+}
+
+u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
+{
+ WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
+ return mlx5e_tir_get_tirn(&res->ptp.tir);
+}
+
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
+}
+
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+{
+ unsigned int nch, ix;
+ int err;
+
+ nch = mlx5e_channels_get_num(chs);
+
+ for (ix = 0; ix < chs->num; ix++)
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ res->rss_nch = chs->num;
+
+ mlx5e_rx_res_rss_enable(res);
+
+ for (ix = 0; ix < nch; ix++) {
+ u32 rqn;
+
+ mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
+ rqn = res->drop_rqn;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ rqn, ix, err);
+ }
+ for (ix = nch; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ }
+
+ if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
+ u32 rqn;
+
+ if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+ rqn = res->drop_rqn;
+
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ rqn, err);
+ }
+}
+
+void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
+{
+ unsigned int ix;
+ int err;
+
+ mlx5e_rx_res_rss_disable(res);
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ }
+
+ if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ res->drop_rqn, err);
+ }
+}
+
+int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix)
+{
+ u32 rqn;
+ int err;
+
+ if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
+ return -EINVAL;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ rqn, ix, err);
+ return err;
+}
+
+int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ int err;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ return err;
+}
+
+int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
+{
+ struct mlx5e_tir_builder *builder;
+ int err, final_err;
+ unsigned int ix;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ mlx5e_tir_builder_build_lro(builder, lro_param);
+
+ final_err = 0;
+
+ for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
+ struct mlx5e_rss *rss = res->rss[ix];
+
+ if (!rss)
+ continue;
+
+ err = mlx5e_rss_lro_set_param(rss, lro_param);
+ if (err)
+ final_err = final_err ? : err;
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
+ mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
+ if (!final_err)
+ final_err = err;
+ }
+ }
+
+ mlx5e_tir_builder_free(builder);
+ return final_err;
+}
+
+struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
+{
+ return mlx5e_rss_get_hash(res->rss[0]);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
new file mode 100644
index 000000000000..4a15942d79f7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RX_RES_H__
+#define __MLX5_EN_RX_RES_H__
+
+#include <linux/kernel.h>
+#include "rqt.h"
+#include "tir.h"
+#include "fs.h"
+#include "rss.h"
+
+struct mlx5e_rx_res;
+
+struct mlx5e_channels;
+struct mlx5e_rss_params_hash;
+
+enum mlx5e_rx_res_features {
+ MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
+ MLX5E_RX_RES_FEATURE_XSK = BIT(1),
+ MLX5E_RX_RES_FEATURE_PTP = BIT(2),
+};
+
+/* Setup */
+struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
+int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
+ enum mlx5e_rx_res_features features, unsigned int max_nch,
+ u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch);
+void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
+void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
+
+/* TIRN getters for flow steering */
+u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
+u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+
+/* RQTN getters for modules that create their own TIRs */
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+
+/* Activate/deactivate API */
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
+void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
+int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix);
+int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
+
+/* Configuration API */
+void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
+int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ const u32 *indir, const u8 *key, const u8 *hfunc);
+
+u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields);
+int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
+
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
+int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
+int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
+int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
+struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
+
+/* Workaround for hairpin */
+struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
+
+#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
new file mode 100644
index 000000000000..a3e43e898a56
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en_tc.h"
+#include "post_act.h"
+#include "mlx5_core.h"
+
+struct mlx5e_post_act {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *ft;
+ struct mlx5e_priv *priv;
+ struct xarray ids;
+};
+
+struct mlx5e_post_act_handle {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_flow_attr *attr;
+ struct mlx5_flow_handle *rule;
+ u32 id;
+};
+
+#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
+#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
+#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+
+struct mlx5e_post_act *
+mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_post_act *post_act;
+ int err;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) {
+ mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ err = -EOPNOTSUPP;
+ goto err_check;
+ } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ err = -EOPNOTSUPP;
+ goto err_check;
+ }
+
+ post_act = kzalloc(sizeof(*post_act), GFP_KERNEL);
+ if (!post_act) {
+ err = -ENOMEM;
+ goto err_check;
+ }
+ post_act->ft = mlx5_chains_create_global_table(chains);
+ if (IS_ERR(post_act->ft)) {
+ err = PTR_ERR(post_act->ft);
+ mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err);
+ goto err_ft;
+ }
+ post_act->chains = chains;
+ post_act->ns_type = ns_type;
+ post_act->priv = priv;
+ xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1);
+ return post_act;
+
+err_ft:
+ kfree(post_act);
+err_check:
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
+{
+ if (IS_ERR_OR_NULL(post_act))
+ return;
+
+ xa_destroy(&post_act->ids);
+ mlx5_chains_destroy_global_table(post_act->chains, post_act->ft);
+ kfree(post_act);
+}
+
+struct mlx5e_post_act_handle *
+mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
+{
+ u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
+ struct mlx5e_post_act_handle *handle = NULL;
+ struct mlx5_flow_attr *post_attr = NULL;
+ struct mlx5_flow_spec *spec = NULL;
+ int err;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
+ if (!handle || !spec || !post_attr) {
+ kfree(post_attr);
+ kvfree(spec);
+ kfree(handle);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(post_attr, attr, attr_sz);
+ post_attr->chain = 0;
+ post_attr->prio = 0;
+ post_attr->ft = post_act->ft;
+ post_attr->inner_match_level = MLX5_MATCH_NONE;
+ post_attr->outer_match_level = MLX5_MATCH_NONE;
+ post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+
+ handle->ns_type = post_act->ns_type;
+ /* Splits were handled before post action */
+ if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ post_attr->esw_attr->split_count = 0;
+
+ err = xa_alloc(&post_act->ids, &handle->id, post_attr,
+ XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL);
+ if (err)
+ goto err_xarray;
+
+ /* Post action rule matches on fte_id and executes original rule's
+ * tc rule action
+ */
+ mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
+ handle->id, MLX5_POST_ACTION_MASK);
+
+ handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
+ if (IS_ERR(handle->rule)) {
+ err = PTR_ERR(handle->rule);
+ netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
+ goto err_rule;
+ }
+ handle->attr = post_attr;
+
+ kvfree(spec);
+ return handle;
+
+err_rule:
+ xa_erase(&post_act->ids, handle->id);
+err_xarray:
+ kfree(post_attr);
+ kvfree(spec);
+ kfree(handle);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
+{
+ mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
+ xa_erase(&post_act->ids, handle->id);
+ kfree(handle->attr);
+ kfree(handle);
+}
+
+struct mlx5_flow_table *
+mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act)
+{
+ return post_act->ft;
+}
+
+/* Allocate a header modify action to write the post action handle fte id to a register. */
+int
+mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev,
+ struct mlx5e_post_act_handle *handle,
+ struct mlx5e_tc_mod_hdr_acts *acts)
+{
+ return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
new file mode 100644
index 000000000000..b530ec1981a5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_POST_ACTION_H__
+#define __MLX5_POST_ACTION_H__
+
+#include "en.h"
+#include "lib/fs_chains.h"
+
+struct mlx5_flow_attr;
+struct mlx5e_priv;
+struct mlx5e_tc_mod_hdr_acts;
+
+struct mlx5e_post_act *
+mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ enum mlx5_flow_namespace_type ns_type);
+
+void
+mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act);
+
+struct mlx5e_post_act_handle *
+mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr);
+
+void
+mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+
+struct mlx5_flow_table *
+mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
+
+int
+mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev,
+ struct mlx5e_post_act_handle *handle,
+ struct mlx5e_tc_mod_hdr_acts *acts);
+
+#endif /* __MLX5_POST_ACTION_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
new file mode 100644
index 000000000000..6552ecee3f9b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/skbuff.h>
+#include <net/psample.h>
+#include "en/mapping.h"
+#include "en/tc/post_act.h"
+#include "sample.h"
+#include "eswitch.h"
+#include "en_tc.h"
+#include "fs_core.h"
+
+#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
+
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
+ .max_num_groups = 0, /* default num of groups */
+ .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
+};
+
+struct mlx5e_tc_psample {
+ struct mlx5_eswitch *esw;
+ struct mlx5_flow_table *termtbl;
+ struct mlx5_flow_handle *termtbl_rule;
+ DECLARE_HASHTABLE(hashtbl, 8);
+ struct mutex ht_lock; /* protect hashtbl */
+ DECLARE_HASHTABLE(restore_hashtbl, 8);
+ struct mutex restore_lock; /* protect restore_hashtbl */
+ struct mlx5e_post_act *post_act;
+};
+
+struct mlx5e_sampler {
+ struct hlist_node hlist;
+ u32 sampler_id;
+ u32 sample_ratio;
+ u32 sample_table_id;
+ u32 default_table_id;
+ int count;
+};
+
+struct mlx5e_sample_flow {
+ struct mlx5e_sampler *sampler;
+ struct mlx5e_sample_restore *restore;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_flow_handle *pre_rule;
+ struct mlx5_flow_attr *post_attr;
+ struct mlx5_flow_handle *post_rule;
+ struct mlx5e_post_act_handle *post_act_handle;
+};
+
+struct mlx5e_sample_restore {
+ struct hlist_node hlist;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_post_act_handle *post_act_handle;
+ u32 obj_id;
+ int count;
+};
+
+static int
+sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
+{
+ struct mlx5_eswitch *esw = tc_psample->esw;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_act act = {};
+ int err;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) {
+ mlx5_core_warn(dev, "termination table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.level = 1;
+ tc_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(tc_psample->termtbl)) {
+ err = PTR_ERR(tc_psample->termtbl);
+ mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
+ return err;
+ }
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.vport.num = esw->manager_vport;
+ tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
+ if (IS_ERR(tc_psample->termtbl_rule)) {
+ err = PTR_ERR(tc_psample->termtbl_rule);
+ mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
+ mlx5_destroy_flow_table(tc_psample->termtbl);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+sampler_termtbl_destroy(struct mlx5e_tc_psample *tc_psample)
+{
+ mlx5_del_flow_rules(tc_psample->termtbl_rule);
+ mlx5_destroy_flow_table(tc_psample->termtbl);
+}
+
+static int
+sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5e_sampler *sampler)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
+ return -EOPNOTSUPP;
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
+ MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
+ MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
+ MLX5_SET(sampler_obj, obj, level, 1);
+ MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
+ MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
+ MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void
+sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static u32
+sampler_hash(u32 sample_ratio, u32 default_table_id)
+{
+ return jhash_2words(sample_ratio, default_table_id, 0);
+}
+
+static int
+sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
+{
+ return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
+}
+
+static struct mlx5e_sampler *
+sampler_get(struct mlx5e_tc_psample *tc_psample, u32 sample_ratio, u32 default_table_id)
+{
+ struct mlx5e_sampler *sampler;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&tc_psample->ht_lock);
+ hash_key = sampler_hash(sample_ratio, default_table_id);
+ hash_for_each_possible(tc_psample->hashtbl, sampler, hlist, hash_key)
+ if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
+ sample_ratio, default_table_id))
+ goto add_ref;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ sampler->sample_table_id = tc_psample->termtbl->id;
+ sampler->default_table_id = default_table_id;
+ sampler->sample_ratio = sample_ratio;
+
+ err = sampler_obj_create(tc_psample->esw->dev, sampler);
+ if (err)
+ goto err_create;
+
+ hash_add(tc_psample->hashtbl, &sampler->hlist, hash_key);
+
+add_ref:
+ sampler->count++;
+ mutex_unlock(&tc_psample->ht_lock);
+ return sampler;
+
+err_create:
+ kfree(sampler);
+err_alloc:
+ mutex_unlock(&tc_psample->ht_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
+{
+ mutex_lock(&tc_psample->ht_lock);
+ if (--sampler->count == 0) {
+ hash_del(&sampler->hlist);
+ sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id);
+ kfree(sampler);
+ }
+ mutex_unlock(&tc_psample->ht_lock);
+}
+
+/* obj_id is used to restore the sample parameters.
+ * Set fte_id in original flow table, then match it in the default table.
+ * Only set it for NICs can preserve reg_c or decap action. For other cases,
+ * use the same match in the default table.
+ * Use one header rewrite for both obj_id and fte_id.
+ */
+static struct mlx5_modify_hdr *
+sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
+ struct mlx5e_post_act_handle *handle)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ CHAIN_TO_REG, obj_id);
+ if (err)
+ goto err_set_regc0;
+
+ if (handle) {
+ err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
+ if (err)
+ goto err_post_act;
+ }
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return modify_hdr;
+
+err_modify_hdr:
+err_post_act:
+ dealloc_mod_hdr_actions(&mod_acts);
+err_set_regc0:
+ return ERR_PTR(err);
+}
+
+static u32
+restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
+{
+ return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
+}
+
+static bool
+restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
+ struct mlx5e_post_act_handle *post_act_handle)
+{
+ return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
+}
+
+static struct mlx5e_sample_restore *
+sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
+ struct mlx5e_post_act_handle *post_act_handle)
+{
+ struct mlx5_eswitch *esw = tc_psample->esw;
+ struct mlx5_core_dev *mdev = esw->dev;
+ struct mlx5e_sample_restore *restore;
+ struct mlx5_modify_hdr *modify_hdr;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&tc_psample->restore_lock);
+ hash_key = restore_hash(obj_id, post_act_handle);
+ hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
+ if (restore_equal(restore, obj_id, post_act_handle))
+ goto add_ref;
+
+ restore = kzalloc(sizeof(*restore), GFP_KERNEL);
+ if (!restore) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ restore->obj_id = obj_id;
+ restore->post_act_handle = post_act_handle;
+
+ modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+ restore->modify_hdr = modify_hdr;
+
+ restore->rule = esw_add_restore_rule(esw, obj_id);
+ if (IS_ERR(restore->rule)) {
+ err = PTR_ERR(restore->rule);
+ goto err_restore;
+ }
+
+ hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
+add_ref:
+ restore->count++;
+ mutex_unlock(&tc_psample->restore_lock);
+ return restore;
+
+err_restore:
+ mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
+err_modify_hdr:
+ kfree(restore);
+err_alloc:
+ mutex_unlock(&tc_psample->restore_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sample_restore_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sample_restore *restore)
+{
+ mutex_lock(&tc_psample->restore_lock);
+ if (--restore->count == 0)
+ hash_del(&restore->hlist);
+ mutex_unlock(&tc_psample->restore_lock);
+
+ if (!restore->count) {
+ mlx5_del_flow_rules(restore->rule);
+ mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr);
+ kfree(restore);
+ }
+}
+
+void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
+{
+ u32 trunc_size = mapped_obj->sample.trunc_size;
+ struct psample_group psample_group = {};
+ struct psample_metadata md = {};
+
+ md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
+ md.in_ifindex = skb->dev->ifindex;
+ psample_group.group_num = mapped_obj->sample.group_id;
+ psample_group.net = &init_net;
+ skb_push(skb, skb->mac_len);
+
+ psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
+}
+
+static int
+add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr,
+ u32 *default_tbl_id)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ u32 attr_sz = ns_to_attr_sz(MLX5_FLOW_NAMESPACE_FDB);
+ struct mlx5_vport_tbl_attr per_vport_tbl_attr;
+ struct mlx5_flow_table *default_tbl;
+ struct mlx5_flow_attr *post_attr;
+ int err;
+
+ /* Allocate default table per vport, chain and prio. Otherwise, there is
+ * only one default table for the same sampler object. Rules with different
+ * prio and chain may overlap. For CT sample action, per vport default
+ * table is needed to resotre the metadata.
+ */
+ per_vport_tbl_attr.chain = attr->chain;
+ per_vport_tbl_attr.prio = attr->prio;
+ per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
+ per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
+ if (IS_ERR(default_tbl)) {
+ err = PTR_ERR(default_tbl);
+ goto err_default_tbl;
+ }
+ *default_tbl_id = default_tbl->id;
+
+ post_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!post_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+ sample_flow->post_attr = post_attr;
+ memcpy(post_attr, attr, attr_sz);
+ /* Perform the original matches on the default table.
+ * Offload all actions except the sample action.
+ */
+ post_attr->chain = 0;
+ post_attr->prio = 0;
+ post_attr->ft = default_tbl;
+ post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+
+ /* When offloading sample and encap action, if there is no valid
+ * neigh data struct, a slow path rule is offloaded first. Source
+ * port metadata match is set at that time. A per vport table is
+ * already allocated. No need to match it again. So clear the source
+ * port metadata match.
+ */
+ mlx5_eswitch_clear_rule_source_port(esw, spec);
+ sample_flow->post_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, post_attr);
+ if (IS_ERR(sample_flow->post_rule)) {
+ err = PTR_ERR(sample_flow->post_rule);
+ goto err_rule;
+ }
+ return 0;
+
+err_rule:
+ kfree(post_attr);
+err_attr:
+ mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
+err_default_tbl:
+ return err;
+}
+
+static void
+del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_vport_tbl_attr tbl_attr;
+
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, sample_flow->post_attr);
+ kfree(sample_flow->post_attr);
+ tbl_attr.chain = attr->chain;
+ tbl_attr.prio = attr->prio;
+ tbl_attr.vport = esw_attr->in_rep->vport;
+ tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ mlx5_esw_vporttbl_put(esw, &tbl_attr);
+}
+
+/* For the following typical flow table:
+ *
+ * +-------------------------------+
+ * + original flow table +
+ * +-------------------------------+
+ * + original match +
+ * +-------------------------------+
+ * + sample action + other actions +
+ * +-------------------------------+
+ *
+ * We translate the tc filter with sample action to the following HW model:
+ *
+ * +---------------------+
+ * + original flow table +
+ * +---------------------+
+ * + original match +
+ * +---------------------+
+ * | set fte_id (if reg_c preserve cap)
+ * | do decap (if required)
+ * v
+ * +------------------------------------------------+
+ * + Flow Sampler Object +
+ * +------------------------------------------------+
+ * + sample ratio +
+ * +------------------------------------------------+
+ * + sample table id | default table id +
+ * +------------------------------------------------+
+ * | |
+ * v v
+ * +-----------------------------+ +-------------------+
+ * + sample table + + default table +
+ * +-----------------------------+ +-------------------+
+ * + forward to management vport + |
+ * +-----------------------------+ |
+ * +-------+------+
+ * | |reg_c preserve cap
+ * | |or decap action
+ * v v
+ * +-----------------+ +-------------+
+ * + per vport table + + post action +
+ * +-----------------+ +-------------+
+ * + original match +
+ * +-----------------+
+ * + other actions +
+ * +-----------------+
+ */
+struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id)
+{
+ struct mlx5e_post_act_handle *post_act_handle = NULL;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_esw_flow_attr *pre_esw_attr;
+ struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5e_sample_flow *sample_flow;
+ struct mlx5e_sample_attr *sample_attr;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_eswitch *esw;
+ u32 default_tbl_id;
+ u32 obj_id;
+ int err;
+
+ if (IS_ERR_OR_NULL(tc_psample))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* If slow path flag is set, eg. when the neigh is invalid for encap,
+ * don't offload sample action.
+ */
+ esw = tc_psample->esw;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
+ if (!sample_flow)
+ return ERR_PTR(-ENOMEM);
+ sample_attr = attr->sample_attr;
+ sample_attr->sample_flow = sample_flow;
+
+ /* For NICs with reg_c_preserve support or decap action, use
+ * post action instead of the per vport, chain and prio table.
+ * Only match the fte id instead of the same match in the
+ * original flow table.
+ */
+ if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ struct mlx5_flow_table *ft;
+
+ ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
+ default_tbl_id = ft->id;
+ post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
+ if (IS_ERR(post_act_handle)) {
+ err = PTR_ERR(post_act_handle);
+ goto err_post_act;
+ }
+ sample_flow->post_act_handle = post_act_handle;
+ } else {
+ err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
+ if (err)
+ goto err_post_rule;
+ }
+
+ /* Create sampler object. */
+ sample_flow->sampler = sampler_get(tc_psample, sample_attr->rate, default_tbl_id);
+ if (IS_ERR(sample_flow->sampler)) {
+ err = PTR_ERR(sample_flow->sampler);
+ goto err_sampler;
+ }
+
+ /* Create an id mapping reg_c0 value to sample object. */
+ restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
+ restore_obj.sample.group_id = sample_attr->group_num;
+ restore_obj.sample.rate = sample_attr->rate;
+ restore_obj.sample.trunc_size = sample_attr->trunc_size;
+ restore_obj.sample.tunnel_id = tunnel_id;
+ err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
+ if (err)
+ goto err_obj_id;
+ sample_attr->restore_obj_id = obj_id;
+
+ /* Create sample restore context. */
+ sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
+ if (IS_ERR(sample_flow->restore)) {
+ err = PTR_ERR(sample_flow->restore);
+ goto err_sample_restore;
+ }
+
+ /* Perform the original matches on the original table. Offload the
+ * sample action. The destination is the sampler object.
+ */
+ pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!pre_attr) {
+ err = -ENOMEM;
+ goto err_alloc_pre_flow_attr;
+ }
+ pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ /* For decap action, do decap in the original flow table instead of the
+ * default flow table.
+ */
+ if (tunnel_id)
+ pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
+ pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->inner_match_level = attr->inner_match_level;
+ pre_attr->outer_match_level = attr->outer_match_level;
+ pre_attr->chain = attr->chain;
+ pre_attr->prio = attr->prio;
+ pre_attr->sample_attr = attr->sample_attr;
+ sample_attr->sampler_id = sample_flow->sampler->sampler_id;
+ pre_esw_attr = pre_attr->esw_attr;
+ pre_esw_attr->in_mdev = esw_attr->in_mdev;
+ pre_esw_attr->in_rep = esw_attr->in_rep;
+ sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
+ if (IS_ERR(sample_flow->pre_rule)) {
+ err = PTR_ERR(sample_flow->pre_rule);
+ goto err_pre_offload_rule;
+ }
+ sample_flow->pre_attr = pre_attr;
+
+ return sample_flow->post_rule;
+
+err_pre_offload_rule:
+ kfree(pre_attr);
+err_alloc_pre_flow_attr:
+ sample_restore_put(tc_psample, sample_flow->restore);
+err_sample_restore:
+ mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
+err_obj_id:
+ sampler_put(tc_psample, sample_flow->sampler);
+err_sampler:
+ if (!post_act_handle)
+ del_post_rule(esw, sample_flow, attr);
+err_post_rule:
+ if (post_act_handle)
+ mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
+err_post_act:
+ kfree(sample_flow);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_sample_flow *sample_flow;
+ struct mlx5_vport_tbl_attr tbl_attr;
+ struct mlx5_eswitch *esw;
+
+ if (IS_ERR_OR_NULL(tc_psample))
+ return;
+
+ /* If slow path flag is set, sample action is not offloaded.
+ * No need to delete sample rule.
+ */
+ esw = tc_psample->esw;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ return;
+ }
+
+ /* The following delete order can't be changed, otherwise,
+ * will hit fw syndromes.
+ */
+ sample_flow = attr->sample_attr->sample_flow;
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
+ if (!sample_flow->post_act_handle)
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
+ sample_flow->post_attr);
+
+ sample_restore_put(tc_psample, sample_flow->restore);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
+ sampler_put(tc_psample, sample_flow->sampler);
+ if (sample_flow->post_act_handle) {
+ mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
+ } else {
+ tbl_attr.chain = attr->chain;
+ tbl_attr.prio = attr->prio;
+ tbl_attr.vport = esw_attr->in_rep->vport;
+ tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ mlx5_esw_vporttbl_put(esw, &tbl_attr);
+ kfree(sample_flow->post_attr);
+ }
+
+ kfree(sample_flow->pre_attr);
+ kfree(sample_flow);
+}
+
+struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
+{
+ struct mlx5e_tc_psample *tc_psample;
+ int err;
+
+ tc_psample = kzalloc(sizeof(*tc_psample), GFP_KERNEL);
+ if (!tc_psample)
+ return ERR_PTR(-ENOMEM);
+ if (IS_ERR_OR_NULL(post_act)) {
+ err = PTR_ERR(post_act);
+ goto err_post_act;
+ }
+ tc_psample->post_act = post_act;
+ tc_psample->esw = esw;
+ err = sampler_termtbl_create(tc_psample);
+ if (err)
+ goto err_post_act;
+
+ mutex_init(&tc_psample->ht_lock);
+ mutex_init(&tc_psample->restore_lock);
+
+ return tc_psample;
+
+err_post_act:
+ kfree(tc_psample);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample)
+{
+ if (IS_ERR_OR_NULL(tc_psample))
+ return;
+
+ mutex_destroy(&tc_psample->restore_lock);
+ mutex_destroy(&tc_psample->ht_lock);
+ sampler_termtbl_destroy(tc_psample);
+ kfree(tc_psample);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
new file mode 100644
index 000000000000..db0146df9b30
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_SAMPLE_H__
+#define __MLX5_EN_TC_SAMPLE_H__
+
+#include "eswitch.h"
+
+struct mlx5_flow_attr;
+struct mlx5e_tc_psample;
+struct mlx5e_post_act;
+
+struct mlx5e_sample_attr {
+ u32 group_num;
+ u32 rate;
+ u32 trunc_size;
+ u32 restore_obj_id;
+ u32 sampler_id;
+ struct mlx5e_sample_flow *sample_flow;
+};
+
+void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
+
+struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id);
+
+void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
+
+void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
+
+#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 91e7a01e32be..6c949abcd2e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -19,6 +19,7 @@
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
+#include "en/tc/post_act.h"
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
@@ -32,10 +33,6 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
-#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
-
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
@@ -46,14 +43,13 @@ struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
const struct net_device *netdev;
struct mod_hdr_tbl *mod_hdr_tbl;
- struct idr fte_ids;
struct xarray tuple_ids;
struct rhashtable zone_ht;
struct rhashtable ct_tuples_ht;
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
- struct mlx5_flow_table *post_ct;
+ struct mlx5e_post_act *post_act;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
@@ -64,11 +60,9 @@ struct mlx5_tc_ct_priv {
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_flow_attr *post_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5_flow_handle *post_ct_rule;
+ struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
- u32 fte_id;
u32 chain_mapping;
};
@@ -768,7 +762,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = 0;
- attr->dest_ft = ct_priv->post_ct;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
attr->counter = entry->counter->counter;
@@ -1432,7 +1426,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
ctstate |= MLX5_CT_STATE_NAT_BIT;
mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
- dest.ft = ct_priv->post_ct;
+ dest.ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1716,9 +1710,9 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do decap
* v
* +---------------------+
- * + pre_ct/pre_ct_nat + if matches +---------------------+
- * + zone+nat match +---------------->+ post_ct (see below) +
- * +---------------------+ set zone +---------------------+
+ * + pre_ct/pre_ct_nat + if matches +-------------------------+
+ * + zone+nat match +---------------->+ post_act (see below) +
+ * +---------------------+ set zone +-------------------------+
* | set zone
* v
* +--------------------+
@@ -1732,7 +1726,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
- * + post_ct + original filter actions
+ * + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
@@ -1746,19 +1740,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5_flow_spec *post_ct_spec = NULL;
+ struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
- struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err;
struct mlx5_ct_ft *ft;
- u32 fte_id = 1;
- post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
- if (!post_ct_spec || !ct_flow) {
- kvfree(post_ct_spec);
+ if (!ct_flow) {
kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
@@ -1773,14 +1763,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->ft = ft;
- err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id,
- MLX5_FTE_ID_MAX, GFP_KERNEL);
- if (err) {
- netdev_warn(priv->netdev,
- "Failed to allocate fte id, err: %d\n", err);
- goto err_idr;
+ handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ ct_dbg("Failed to allocate post action handle");
+ goto err_post_act_handle;
}
- ct_flow->fte_id = fte_id;
+ ct_flow->post_act_handle = handle;
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
@@ -1789,15 +1778,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
goto err_alloc_pre;
}
- ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
- if (!ct_flow->post_ct_attr) {
- err = -ENOMEM;
- goto err_alloc_post;
- }
-
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
- memcpy(ct_flow->post_ct_attr, attr, attr_sz);
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1823,10 +1805,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
- FTEID_TO_REG, fte_id);
+ err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
if (err) {
- ct_dbg("Failed to set fte_id register mapping");
+ ct_dbg("Failed to set post action handle");
goto err_mapping;
}
@@ -1857,33 +1838,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
pre_ct_attr->modify_hdr = mod_hdr;
- /* Post ct rule matches on fte_id and executes original rule's
- * tc rule action
- */
- mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
- fte_id, MLX5_FTE_ID_MASK);
-
- /* Put post_ct rule on post_ct flow table */
- ct_flow->post_ct_attr->chain = 0;
- ct_flow->post_ct_attr->prio = 0;
- ct_flow->post_ct_attr->ft = ct_priv->post_ct;
-
- /* Splits were handled before CT */
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
- ct_flow->post_ct_attr->esw_attr->split_count = 0;
-
- ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
- rule = mlx5_tc_rule_insert(priv, post_ct_spec,
- ct_flow->post_ct_attr);
- ct_flow->post_ct_rule = rule;
- if (IS_ERR(ct_flow->post_ct_rule)) {
- err = PTR_ERR(ct_flow->post_ct_rule);
- ct_dbg("Failed to add post ct rule");
- goto err_insert_post_ct;
- }
-
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
@@ -1897,28 +1851,21 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
attr->ct_attr.ct_flow = ct_flow;
dealloc_mod_hdr_actions(&pre_mod_acts);
- kvfree(post_ct_spec);
- return rule;
+ return ct_flow->pre_ct_rule;
err_insert_orig:
- mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
- ct_flow->post_ct_attr);
-err_insert_post_ct:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
dealloc_mod_hdr_actions(&pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
- kfree(ct_flow->post_ct_attr);
-err_alloc_post:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
- idr_remove(&ct_priv->fte_ids, fte_id);
-err_idr:
+ mlx5e_tc_post_act_del(ct_priv->post_act, handle);
+err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
- kvfree(post_ct_spec);
kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return ERR_PTR(err);
@@ -2029,16 +1976,13 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- if (ct_flow->post_ct_rule) {
- mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
- ct_flow->post_ct_attr);
+ if (ct_flow->post_act_handle) {
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
- idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
+ mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
}
kfree(ct_flow->pre_ct_attr);
- kfree(ct_flow->post_ct_attr);
kfree(ct_flow);
}
@@ -2064,11 +2008,6 @@ static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
const char **err_msg)
{
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
- *err_msg = "firmware level support is missing";
- return -EOPNOTSUPP;
- }
-
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
/* vlan workaround should be avoided for multi chain rules.
* This is just a sanity check as pop vlan action should
@@ -2098,20 +2037,9 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
}
static int
-mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv,
- const char **err_msg)
-{
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
- *err_msg = "firmware level support is missing";
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int
mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
const char **err_msg)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -2122,10 +2050,14 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
*err_msg = "tc skb extension missing";
return -EOPNOTSUPP;
#endif
+ if (IS_ERR_OR_NULL(post_act)) {
+ *err_msg = "tc ct offload not supported, post action is missing";
+ return -EOPNOTSUPP;
+ }
+
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
- else
- return mlx5_tc_ct_init_check_nic_support(priv, err_msg);
+ return 0;
}
#define INIT_ERR_PREFIX "tc ct offload init failed"
@@ -2133,19 +2065,19 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
struct mlx5_tc_ct_priv *
mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
struct mod_hdr_tbl *mod_hdr,
- enum mlx5_flow_namespace_type ns_type)
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
{
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
const char *msg;
+ u64 mapping_id;
int err;
dev = priv->mdev;
- err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg);
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg);
if (err) {
- mlx5_core_warn(dev,
- "tc ct offload not supported, %s\n",
- msg);
+ mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg);
goto err_support;
}
@@ -2153,13 +2085,17 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!ct_priv)
goto err_alloc;
- ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
+ mapping_id = mlx5_query_nic_system_image_guid(dev);
+
+ ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
+ sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping_zone;
}
- ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true);
+ ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
+ sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
goto err_mapping_labels;
@@ -2189,16 +2125,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_ct_nat_tbl;
}
- ct_priv->post_ct = mlx5_chains_create_global_table(chains);
- if (IS_ERR(ct_priv->post_ct)) {
- err = PTR_ERR(ct_priv->post_ct);
- mlx5_core_warn(dev,
- "%s, failed to create post ct table err: %d\n",
- INIT_ERR_PREFIX, err);
- goto err_post_ct_tbl;
- }
-
- idr_init(&ct_priv->fte_ids);
+ ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
@@ -2206,8 +2133,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
return ct_priv;
-err_post_ct_tbl:
- mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
@@ -2232,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
chains = ct_priv->chains;
- mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
@@ -2242,7 +2166,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
- idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 644cf1641cde..363329f4aac6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -92,7 +92,8 @@ struct mlx5_ct_attr {
struct mlx5_tc_ct_priv *
mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
struct mod_hdr_tbl *mod_hdr,
- enum mlx5_flow_namespace_type ns_type);
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act);
void
mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv);
@@ -132,7 +133,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
static inline struct mlx5_tc_ct_priv *
mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
struct mod_hdr_tbl *mod_hdr,
- enum mlx5_flow_namespace_type ns_type)
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
{
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 1e2d117082d4..b4e986818794 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -525,7 +525,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->out_dev = attr.out_dev;
e->route_dev_ifindex = attr.route_dev->ifindex;
- /* It's importent to add the neigh to the hash table before checking
+ /* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 2e846b741280..1c44c6c345f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, flow_list, tmp_list) {
- if (!mlx5e_is_offloaded_flow(flow))
+ if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
@@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
int err;
list_for_each_entry(flow, flow_list, tmp_list) {
- if (!mlx5e_is_offloaded_flow(flow))
+ if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
new file mode 100644
index 000000000000..de936dc4bc48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "tir.h"
+#include "params.h"
+#include <linux/mlx5/transobj.h>
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+
+/* max() doesn't work inside square brackets. */
+#define MLX5E_TIR_CMD_IN_SZ_DW ( \
+ MLX5_ST_SZ_DW(create_tir_in) > MLX5_ST_SZ_DW(modify_tir_in) ? \
+ MLX5_ST_SZ_DW(create_tir_in) : MLX5_ST_SZ_DW(modify_tir_in) \
+)
+
+struct mlx5e_tir_builder {
+ u32 in[MLX5E_TIR_CMD_IN_SZ_DW];
+ bool modify;
+};
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
+{
+ struct mlx5e_tir_builder *builder;
+
+ builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
+ builder->modify = modify;
+
+ return builder;
+}
+
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder)
+{
+ kvfree(builder);
+}
+
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder)
+{
+ memset(builder->in, 0, sizeof(builder->in));
+}
+
+static void *mlx5e_tir_builder_get_tirc(struct mlx5e_tir_builder *builder)
+{
+ if (builder->modify)
+ return MLX5_ADDR_OF(modify_tir_in, builder->in, ctx);
+ return MLX5_ADDR_OF(create_tir_in, builder->in, ctx);
+}
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+}
+
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
+}
+
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ const unsigned int rough_max_l2_l3_hdr_sz = 256;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
+
+ if (!lro_param->enabled)
+ return;
+
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+}
+
+static int mlx5e_hfunc_to_hw(u8 hfunc)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return MLX5_RX_HASH_FN_TOEPLITZ;
+ case ETH_RSS_HASH_XOR:
+ return MLX5_RX_HASH_FN_INVERTED_XOR8;
+ default:
+ return MLX5_RX_HASH_FN_NONE;
+ }
+}
+
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ void *hfso;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.hash, 1);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_hfunc_to_hw(rss_hash->hfunc));
+ if (rss_hash->hfunc == ETH_RSS_HASH_TOP) {
+ const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
+ }
+
+ if (inner)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, rss_tt->l3_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, rss_tt->l4_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss_tt->rx_hash_fields);
+}
+
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, tls_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+}
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg)
+{
+ int err;
+
+ tir->mdev = mdev;
+
+ err = mlx5_core_create_tir(tir->mdev, builder->in, &tir->tirn);
+ if (err)
+ return err;
+
+ if (reg) {
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ mutex_lock(&res->td.list_lock);
+ list_add(&tir->list, &res->td.tirs_list);
+ mutex_unlock(&res->td.list_lock);
+ } else {
+ INIT_LIST_HEAD(&tir->list);
+ }
+
+ return 0;
+}
+
+void mlx5e_tir_destroy(struct mlx5e_tir *tir)
+{
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ /* Skip mutex if list_del is no-op (the TIR wasn't registered in the
+ * list). list_empty will never return true for an item of tirs_list,
+ * and READ_ONCE/WRITE_ONCE in list_empty/list_del guarantee consistency
+ * of the list->next value.
+ */
+ if (!list_empty(&tir->list)) {
+ mutex_lock(&res->td.list_lock);
+ list_del(&tir->list);
+ mutex_unlock(&res->td.list_lock);
+ }
+
+ mlx5_core_destroy_tir(tir->mdev, tir->tirn);
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder)
+{
+ return mlx5_core_modify_tir(tir->mdev, tir->tirn, builder->in);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
new file mode 100644
index 000000000000..e45149a78ed9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_TIR_H__
+#define __MLX5_EN_TIR_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_rss_params_hash {
+ u8 hfunc;
+ u8 toeplitz_hash_key[40];
+};
+
+struct mlx5e_rss_params_traffic_type {
+ u8 l3_prot_type;
+ u8 l4_prot_type;
+ u32 rx_hash_fields;
+};
+
+struct mlx5e_tir_builder;
+struct mlx5e_lro_param;
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support);
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner);
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
+
+struct mlx5_core_dev;
+
+struct mlx5e_tir {
+ struct mlx5_core_dev *mdev;
+ u32 tirn;
+ struct list_head list;
+};
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg);
+void mlx5e_tir_destroy(struct mlx5e_tir *tir);
+
+static inline u32 mlx5e_tir_get_tirn(struct mlx5e_tir *tir)
+{
+ return tir->tirn;
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder);
+
+#endif /* __MLX5_EN_TIR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 7f94508594fb..d54607a42740 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -92,30 +92,19 @@ static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
u32 rqn)
{
- void *tirc;
- int inlen;
- u32 *in;
+ struct mlx5e_tir_builder *builder;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn, rqn);
- err = mlx5e_create_tir(mdev, tir, in);
- kvfree(in);
+ mlx5e_tir_builder_build_inline(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqn);
+ err = mlx5e_tir_init(tir, builder, mdev, true);
- return err;
-}
+ mlx5e_tir_builder_free(builder);
-static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
-{
- mlx5e_destroy_tir(mdev, tir);
+ return err;
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
@@ -173,7 +162,7 @@ err_napi_del:
void mlx5e_close_trap(struct mlx5e_trap *trap)
{
- mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
netif_napi_del(&trap->napi);
kvfree(trap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 71e8d66fa150..7b562d2c8a19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -122,7 +122,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
* any Fill Ring entries at the setup stage.
*/
- err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]);
+ err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
if (unlikely(err))
goto err_deactivate;
@@ -169,7 +169,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
- mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
+ mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index a8315f166696..538bc2419bd8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -126,7 +126,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
/* Create a separate SQ, so that when the buff pool is disabled, we could
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
* the XDPSQ was used instead, we might run into trouble when the buff pool
- * is disabled and then reenabled, but the SQ continues receiving CQEs
+ * is disabled and then re-enabled, but the SQ continues receiving CQEs
* from the old buff pool.
*/
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
@@ -183,73 +183,3 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
}
-
-static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
-{
- struct mlx5e_redirect_rqt_param direct_rrp = {
- .is_rss = false,
- {
- .rqn = rqn,
- },
- };
-
- u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
-
- return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
-}
-
-int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
-{
- return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
-}
-
-int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
-{
- return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
-}
-
-int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
-{
- int err, i;
-
- if (!priv->xsk.refcnt)
- return 0;
-
- for (i = 0; i < chs->num; i++) {
- struct mlx5e_channel *c = chs->c[i];
-
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- continue;
-
- err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
- if (unlikely(err))
- goto err_stop;
- }
-
- return 0;
-
-err_stop:
- for (i--; i >= 0; i--) {
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
- continue;
-
- mlx5e_xsk_redirect_rqt_to_drop(priv, i);
- }
-
- return err;
-}
-
-void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
-{
- int i;
-
- if (!priv->xsk.refcnt)
- return;
-
- for (i = 0; i < chs->num; i++) {
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
- continue;
-
- mlx5e_xsk_redirect_rqt_to_drop(priv, i);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index ca20f1ff5e39..50e111b85efd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -17,9 +17,5 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
void mlx5e_deactivate_xsk(struct mlx5e_channel *c);
-int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c);
-int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix);
-int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
-void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
#endif /* __MLX5_EN_XSK_SETUP_H__ */