aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c4
-rw-r--r--include/linux/mlx5/fs.h4
42 files changed, 1462 insertions, 788 deletions
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 520034acf73a..162814ae8cb4 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -943,7 +943,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
}
dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst.counter_id = mlx5_fc_id(opfc->fc);
+ dst.counter = opfc->fc;
flow_act.action =
MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -1113,8 +1113,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest_arr[dest_num].counter_id =
- mlx5_fc_id(mcounters->hw_cntrs_hndl);
+ dest_arr[dest_num].counter =
+ mcounters->hw_cntrs_hndl;
dest_num++;
}
@@ -1603,7 +1603,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
static struct mlx5_ib_flow_handler *raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
- u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
+ struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1652,8 +1652,12 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
}
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (WARN_ON(!counter)) {
+ err = -EINVAL;
+ goto unlock;
+ }
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst[dst_num].counter_id = counter_id;
+ dst[dst_num].counter = counter;
dst_num++;
}
@@ -1878,7 +1882,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return 0;
}
-static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
+static bool
+is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
@@ -1888,6 +1893,7 @@ static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
if (offset && offset >= devx_obj->flow_counter_bulk_size)
return false;
+ *fc_bulk_size = devx_obj->flow_counter_bulk_size;
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
@@ -1904,13 +1910,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
{
struct mlx5_flow_context flow_context = {.flow_tag =
MLX5_FS_DEFAULT_FLOW_TAG};
- u32 *offset_attr, offset = 0, counter_id = 0;
int dest_id, dest_type = -1, inlen, len, ret, i;
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
struct ib_uobject **arr_flow_actions;
struct ib_uflow_resources *uflow_res;
struct mlx5_flow_act flow_act = {};
+ struct mlx5_fc *counter = NULL;
struct ib_qp *qp = NULL;
void *devx_obj, *cmd_in;
struct ib_uobject *uobj;
@@ -1937,6 +1943,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
+ u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0;
devx_obj = arr_flow_actions[0]->object;
if (uverbs_attr_is_valid(attrs,
@@ -1956,8 +1963,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
offset = *offset_attr;
}
- if (!is_flow_counter(devx_obj, offset, &counter_id))
+ if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size))
return -EINVAL;
+ counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -1968,8 +1978,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
- if (!uflow_res)
- return -ENOMEM;
+ if (!uflow_res) {
+ ret = -ENOMEM;
+ goto destroy_counter;
+ }
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
@@ -1996,7 +2008,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_handler =
raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
- counter_id, cmd_in, inlen, dest_id, dest_type);
+ counter, cmd_in, inlen, dest_id, dest_type);
if (IS_ERR(flow_handler)) {
ret = PTR_ERR(flow_handler);
goto err_out;
@@ -2007,6 +2019,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
return 0;
err_out:
ib_uverbs_flow_resources_free(uflow_res);
+destroy_counter:
+ if (counter)
+ mlx5_fc_local_destroy(counter);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index be3d0876c521..10a763e668ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
#
# Netdev basic
@@ -123,6 +123,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
steering/sws/dr_ste_v0.o \
steering/sws/dr_ste_v1.o \
steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
steering/sws/dr_cmd.o \
steering/sws/dr_fw.o \
steering/sws/dr_action.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..687bd95d2c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
rx->ft.status = ft;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ dest[1].counter = rx->fc->cnt;
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1762,7 +1762,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1835,7 +1835,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1913,7 +1913,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..9ba99609999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..7dd1dc3f77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..5f647358a05c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d5b42b3a19fd..8636f0485800 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -724,7 +724,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..ae20c061e0fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -718,7 +719,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..41b5e98a0495 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_destroy(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -3658,8 +3665,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..d8e1c4ebd364 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,6 +44,11 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
struct mlx5_fc_cache {
u64 packets;
u64 bytes;
@@ -52,6 +58,7 @@ struct mlx5_fc_cache {
struct mlx5_fc {
u32 id;
bool aging;
+ enum mlx5_fc_type type;
struct mlx5_fc_bulk *bulk;
struct mlx5_fc_cache cache;
/* last{packets,bytes} are used for calculating deltas since last reading. */
@@ -59,17 +66,6 @@ struct mlx5_fc {
u64 lastbytes;
};
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +76,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +182,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -438,11 +437,9 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
/* Flow counter bluks */
struct mlx5_fc_bulk {
- struct list_head pool_list;
+ struct mlx5_fs_bulk fs_bulk;
u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
+ struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
@@ -452,16 +449,10 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
-{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
-}
-
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +460,139 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
-
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
-
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
-
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
-}
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
}
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ counter->bulk = fc_bulk;
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
-
- mutex_lock(&fc_pool->pool_lock);
-
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
-
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..b891d7b9e3e0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..3b149863260c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 7f68468c2e75..cea5aa314f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -43,10 +43,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,11 +513,15 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
if (ndev)
dev_hold(ndev);
@@ -476,16 +535,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -523,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -544,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct lag_func *dev0;
- if (mode == MLX5_LAG_MODE_MPESW)
+ if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
return;
+ dev0 = &ldev->pf[first_idx];
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
@@ -593,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -608,9 +685,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -620,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -661,17 +743,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -709,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -754,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -761,30 +855,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -795,10 +888,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -812,10 +902,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -828,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -842,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -854,17 +946,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -876,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -892,11 +988,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -917,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -956,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -966,7 +1067,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -977,7 +1078,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
@@ -1010,12 +1111,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1068,7 +1166,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1083,8 +1181,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1234,15 +1337,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1257,7 +1357,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1267,13 +1367,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1281,16 +1378,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1398,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1406,17 +1500,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1467,12 +1577,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1555,7 +1666,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1594,13 +1705,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1621,10 +1732,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1643,8 +1754,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..01cf72366947 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -103,7 +103,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +119,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..ffac0bd6c895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -68,13 +68,15 @@ err_metadata:
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
+ dev0 = ldev->pf[idx].dev;
if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
return -EOPNOTSUPP;
@@ -98,7 +100,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +114,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..22241f52716c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ };
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
+
+ if (first_idx < 0)
+ return;
- for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -565,7 +586,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..762d55ba9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..a108d8c726f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -664,6 +664,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +945,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 0b745968e21e..3d4965213b01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -60,9 +60,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index fd48b05e91e0..4a8928f33bb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -161,8 +161,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 47f5cc8de73f..1c9cc4fba083 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..5121951f2778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 883b4ed30892..20fe126ffd22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -896,15 +896,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -1005,7 +1008,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1021,7 +1024,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..f833092235c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 49f22cad92bf..60cb4527588a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..5f409dc30aca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,17 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +217,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..7f83d77c43ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..a8d9e308d339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -7,6 +7,138 @@
#include "dr_types.h"
#include "dr_ste.h"
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
@@ -17,11 +149,18 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
u8 length, u32 data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..0882dba0f64b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -2,167 +2,7 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "dr_ste_v1.h"
-
-enum {
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
- },
-};
+#include "dr_ste_v2.h"
static struct mlx5dr_ste_ctx ste_ctx_v2 = {
/* Builders */
@@ -223,7 +63,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
new file mode 100644
index 000000000000..d853fde49cfc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..cc60ce1d274e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..8007d3f523c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..0bb3724c10c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -160,7 +160,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5f581e71e201..36099047560d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1952,7 +1952,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
goto out_free;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter);
+ dests[1].counter = node->ucast_counter.counter;
#endif
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->ucast_rule)) {
@@ -1961,7 +1961,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter);
+ dests[1].counter = node->mcast_counter.counter;
#endif
memset(dmac_c, 0, ETH_ALEN);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 438db888bde0..2a69d9d71276 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -163,7 +163,7 @@ struct mlx5_flow_destination {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- u32 counter_id;
+ struct mlx5_fc *counter;
struct {
u16 num;
u16 vhca_id;
@@ -299,6 +299,8 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
+void mlx5_fc_local_destroy(struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);