aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c678
1 files changed, 604 insertions, 74 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2f6a0ae20650..94cb0217b4f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -38,7 +38,9 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
+#include "esw/indir_table.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -257,7 +259,9 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr,
+ struct mlx5_eswitch *src_esw,
+ u16 vport)
{
void *misc2;
void *misc;
@@ -266,10 +270,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
* VHCA in dual-port RoCE mode, and matching on source vport may fail.
*/
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ vport = mlx5_esw_indir_table_decap_vport(attr);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
- attr->in_rep->vport));
+ mlx5_eswitch_get_vport_metadata_for_match(src_esw,
+ vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
@@ -278,12 +284,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+ MLX5_CAP_GEN(src_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -295,6 +301,299 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
}
}
+static int
+esw_setup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5_flow_table *ft;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ mlx5_esw_indir_table_decap_vport(attr), true);
+ return PTR_ERR_OR_ZERO(ft);
+}
+
+static void
+esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_decap_vport(attr),
+ true);
+}
+
+static int
+esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ return esw_setup_decap_indir(esw, attr, spec);
+ return 0;
+}
+
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
+}
+
+static int
+esw_setup_chain_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level,
+ int i)
+{
+ struct mlx5_flow_table *ft;
+
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_chains_get_table(chains, chain, prio, level);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = ft;
+ return 0;
+}
+
+static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+ int from, int to)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int i;
+
+ for (i = from; i < to; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ false);
+}
+
+static bool
+esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ return true;
+ return false;
+}
+
+static int
+esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_fs_chains *chains,
+ struct mlx5_flow_attr *attr,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
+ if (err)
+ goto err_setup_chain;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+ }
+ return 0;
+
+err_setup_chain:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+}
+
+static bool
+esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ return true;
+ return false;
+}
+
+static int
+esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ bool ignore_flow_lvl,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ if (ignore_flow_lvl)
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ esw_attr->dests[j].rep->vport, false);
+ if (IS_ERR(dest[*i].ft)) {
+ err = PTR_ERR(dest[*i].ft);
+ goto err_indir_tbl_get;
+ }
+ }
+
+ if (mlx5_esw_indir_table_decap_vport(attr)) {
+ err = esw_setup_decap_indir(esw, attr, spec);
+ if (err)
+ goto err_indir_tbl_get;
+ }
+
+ return 0;
+
+err_indir_tbl_get:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+ esw_cleanup_decap_indir(esw, attr);
+}
+
+static void
+esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
+{
+ mlx5_chains_put_table(chains, chain, prio, level);
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+ if (pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static int
+esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int i)
+{
+ int j;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
+ esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
+ return i;
+}
+
+static int
+esw_setup_dests(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int err = 0;
+
+ if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
+ MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw))
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+
+ if (attr->dest_ft) {
+ esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ } else if (esw_is_indir_table(esw, attr)) {
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
+ err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
+ } else {
+ *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+ }
+
+ return err;
+}
+
+static void
+esw_cleanup_dests(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+
+ if (attr->dest_ft) {
+ esw_cleanup_decap_indir(esw, attr);
+ } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (attr->dest_chain)
+ esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
+ else if (esw_is_indir_table(esw, attr))
+ esw_cleanup_indir_table(esw, attr);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ esw_cleanup_chain_src_port_rewrite(esw, attr);
+ }
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -308,7 +607,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
- int j, i = 0;
+ int i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
@@ -329,50 +628,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- struct mlx5_flow_table *ft;
-
- if (attr->dest_ft) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = attr->dest_ft;
- i++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
- i++;
- } else if (attr->dest_chain) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_chains_get_table(chains, attr->dest_chain,
- 1, 0);
- if (IS_ERR(ft)) {
- rule = ERR_CAST(ft);
- goto err_create_goto_table;
- }
-
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = ft;
- i++;
- } else {
- for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[j].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |=
- MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- }
- i++;
- }
+ int err;
+
+ err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_goto_table;
}
}
@@ -407,15 +671,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
-
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
@@ -434,8 +698,7 @@ err_add_rule:
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
err_create_goto_table:
return rule;
}
@@ -453,7 +716,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
- int i;
+ int i, err = 0;
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
@@ -472,22 +735,26 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[i].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
+ if (esw_is_indir_table(esw, attr))
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+ &i);
+ else
+ esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_chain_src_rewrite;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb;
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -495,13 +762,16 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
- if (IS_ERR(rule))
- goto add_err;
+ if (IS_ERR(rule)) {
+ i = esw_attr->split_count;
+ goto err_chain_src_rewrite;
+ }
atomic64_inc(&esw->offloads.num_flows);
return rule;
-add_err:
+err_chain_src_rewrite:
+ esw_put_dest_tables_loop(esw, attr, 0, i);
esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
@@ -542,13 +812,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) {
esw_vport_tbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+ esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
- if (attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
}
}
@@ -810,6 +1080,81 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
+ int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
+
+ if (!num_vfs || !flows)
+ return;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
+ mlx5_del_flow_rules(flows[i++]);
+
+ kvfree(flows);
+}
+
+static int
+mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ int num_vfs, vport_num, rule_idx = 0, err = 0;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_handle **flows;
+ struct mlx5_flow_spec *spec;
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ if (!flows)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
+
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
+ rule_idx, PTR_ERR(flow_rule));
+ goto rule_err;
+ }
+ flows[rule_idx++] = flow_rule;
+ }
+
+ esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
+ kvfree(spec);
+ return 0;
+
+rule_err:
+ while (--rule_idx >= 0)
+ mlx5_del_flow_rules(flows[rule_idx]);
+ kvfree(spec);
+alloc_err:
+ kvfree(flows);
+ return err;
+}
+
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
@@ -1292,11 +1637,11 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
+ int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
u32 flags = 0, *flow_group_in;
- int table_size, ix, err = 0;
struct mlx5_flow_group *g;
void *match_criteria;
u8 *dmac;
@@ -1322,7 +1667,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
- MLX5_ESW_MISS_FLOWS + esw->total_vports;
+ MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1370,6 +1715,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
+ /* meta send to vport */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ if (num_vfs) {
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
+ ix += num_vfs;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+ err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+ if (err)
+ goto meta_rule_err;
+ }
+
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
@@ -1437,6 +1814,11 @@ miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
+meta_rule_err:
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
+send_vport_meta_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
@@ -1458,7 +1840,10 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -1800,11 +2185,22 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
+static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int i;
+
+ mlx5_esw_for_each_sf_rep(esw, i, rep)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
+ __unload_reps_sf_vport(esw, rep_type);
+
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -1822,7 +2218,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -1846,7 +2242,7 @@ err_reps:
return err;
}
-static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2171,12 +2567,20 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
+ struct mlx5_esw_indir_table *indir;
int err;
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
+ indir = mlx5_esw_indir_table_init();
+ if (IS_ERR(indir)) {
+ err = PTR_ERR(indir);
+ goto create_indir_err;
+ }
+ esw->fdb_table.offloads.indir = indir;
+
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -2208,6 +2612,8 @@ create_restore_err:
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
create_acl_err:
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
+create_indir_err:
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
@@ -2219,6 +2625,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
@@ -2824,3 +3231,126 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ int err;
+
+ err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ return err;
+
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ if (err)
+ goto devlink_err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto rep_err;
+ return 0;
+
+rep_err:
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+devlink_err:
+ mlx5_esw_vport_disable(esw, vport_num);
+ return err;
+}
+
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
+}
+
+static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
+ !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return -EPERM;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
+int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *old_entry, *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err) {
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
+ vport_num, err);
+ return err;
+ }
+
+ vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
+ if (!vhca_map_entry)
+ return -ENOMEM;
+
+ *vhca_map_entry = vport_num;
+ old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
+ if (xa_is_err(old_entry)) {
+ kfree(vhca_map_entry);
+ return xa_err(old_entry);
+ }
+ kfree(old_entry);
+ return 0;
+}
+
+void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err)
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
+ vport_num, err);
+
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ kfree(vhca_map_entry);
+}
+
+int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
+{
+ u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
+
+ if (!res)
+ return -ENOENT;
+
+ *vport_num = *res;
+ return 0;
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport)))
+ return 0;
+
+ return vport->metadata;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);