aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_tc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c887
1 files changed, 731 insertions, 156 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fade7233dac5..11c27e4fadf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -42,14 +42,25 @@
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/vxlan.h>
+#include <net/arp.h>
#include "en.h"
+#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+struct mlx5_nic_flow_attr {
+ u32 action;
+ u32 flow_tag;
+ u32 mod_hdr_id;
+};
+
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
+ MLX5E_TC_FLOW_NIC = BIT(1),
+ MLX5E_TC_FLOW_OFFLOADED = BIT(2),
};
struct mlx5e_tc_flow {
@@ -58,7 +69,16 @@ struct mlx5e_tc_flow {
u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
- struct mlx5_esw_flow_attr *attr;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+struct mlx5e_tc_flow_parse_attr {
+ struct mlx5_flow_spec spec;
+ int num_mod_hdr_actions;
+ void *mod_hdr_actions;
};
enum {
@@ -71,24 +91,26 @@ enum {
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- u32 action, u32 flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {
- .action = action,
- .flow_tag = flow_tag,
+ .action = attr->action,
+ .flow_tag = attr->flow_tag,
.encap_id = 0,
};
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
+ int err;
- if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.vlan.ft.t;
- } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
@@ -97,6 +119,19 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
dest.counter = counter;
}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ flow_act.modify_id = attr->mod_hdr_id;
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_mod_hdr_id;
+ }
+ }
+
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
@@ -114,8 +149,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
table_created = true;
}
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+ &flow_act, &dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
@@ -128,6 +164,10 @@ err_add_rule:
priv->fs.tc.t = NULL;
}
err_create_ft:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
return rule;
@@ -138,47 +178,195 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_fc *counter = NULL;
- if (!IS_ERR(flow->rule)) {
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
- }
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+
+ if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ flow->nic_attr->mod_hdr_id);
}
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_handle *rule;
int err;
err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_vlan;
+ }
- return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_mod_hdr;
+ }
+ }
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
+
+err_add_rule:
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_mod_hdr:
+ mlx5_eswitch_del_vlan_action(esw, attr);
+err_add_vlan:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ mlx5e_detach_encap(priv, flow);
+ return rule;
+}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ }
- mlx5_eswitch_del_vlan_action(esw, flow->attr);
+ mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
- if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
+ kvfree(flow->esw_attr->parse_attr);
+ }
+
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+}
+
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ e->encap_size, e->encap_header,
+ &e->encap_id);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
+ err);
+ return;
+ }
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ flow->esw_attr->encap_id = e->encap_id;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv,
+ flow->esw_attr->parse_attr,
+ flow);
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ continue;
+ }
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ }
+}
+
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_fc *counter;
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
+ }
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ }
+}
+
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ u64 bytes, packets, lastuse = 0;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_encap_entry *e;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (m_neigh->family == AF_INET6)
+ tbl = ipv6_stub->nd_tbl;
+#endif
+ else
+ return;
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
+ continue;
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (neigh_used) {
+ nhe->reported_lastuse = jiffies;
+
+ /* find the relevant neigh according to the cached device and
+ * dst ip pair
+ */
+ n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
+ if (!n) {
+ WARN(1, "The neighbour already freed\n");
+ return;
+ }
+
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -188,22 +376,20 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
list_del(&flow->encap);
if (list_empty(next)) {
- struct mlx5_encap_entry *e;
+ struct mlx5e_encap_entry *e;
+
+ e = list_entry(next, struct mlx5e_encap_entry, flows);
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
- e = list_entry(next, struct mlx5_encap_entry, flows);
- if (e->n) {
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id);
- neigh_release(e->n);
- }
+
hlist_del_rcu(&e->encap_hlist);
+ kfree(e->encap_header);
kfree(e);
}
}
-/* we get here also when setting rule to the FW failed, etc. It means that the
- * flow rule itself might not exist, but some offloading related to the actions
- * should be cleaned.
- */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -631,15 +817,18 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep;
u8 min_inline;
int err;
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
- rep->vport != FDB_UPLINK_VPORT) {
- if (min_inline > esw->offloads.inline_mode) {
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ rep = rpriv->rep;
+ if (rep->vport != FDB_UPLINK_VPORT &&
+ (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
+ esw->offloads.inline_mode < min_inline)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode);
@@ -650,29 +839,313 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
+struct pedit_headers {
+ struct ethhdr eth;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+static int pedit_header_offsets[] = {
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers *masks,
+ struct pedit_headers *vals)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
+ goto out_err;
+
+ curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ goto out_err;
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+struct mlx5_fields {
+ u8 field;
+ u8 size;
+ u32 offset;
+};
+
+static struct mlx5_fields fields[] = {
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
+ {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
+
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
+
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+};
+
+/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, it says how many HW actions were
+ * actually parsed.
+ */
+static int offload_pedit_fields(struct pedit_headers *masks,
+ struct pedit_headers *vals,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ int i, action_size, nactions, max_actions, first, last;
+ void *s_masks_p, *a_masks_p, *vals_p;
+ u32 s_mask, a_mask, val;
+ struct mlx5_fields *f;
+ u8 cmd, field_bsize;
+ unsigned long mask;
+ void *action;
+
+ set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
+ set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action = parse_attr->mod_hdr_actions;
+ max_actions = parse_attr->num_mod_hdr_actions;
+ nactions = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ /* avoid seeing bits set from previous iterations */
+ s_mask = a_mask = mask = val = 0;
+
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+ memcpy(&s_mask, s_masks_p, f->size);
+ memcpy(&a_mask, a_masks_p, f->size);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+
+ if (s_mask && a_mask) {
+ printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
+ return -EOPNOTSUPP;
+ }
+
+ if (nactions == max_actions) {
+ printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
+ return -EOPNOTSUPP;
+ }
+
+ if (s_mask) {
+ cmd = MLX5_ACTION_TYPE_SET;
+ mask = s_mask;
+ vals_p = (void *)set_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(s_masks_p, 0, f->size);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(a_masks_p, 0, f->size);
+ }
+
+ memcpy(&val, vals_p, f->size);
+
+ field_bsize = f->size * BITS_PER_BYTE;
+ first = find_first_bit(&mask, field_bsize);
+ last = find_last_bit(&mask, field_bsize);
+ if (first > 0 || last != (field_bsize - 1)) {
+ printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, action, action_type, cmd);
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
+ MLX5_SET(set_action_in, action, offset, 0);
+ /* length is num of bits to be written, zero means length of 32 */
+ MLX5_SET(set_action_in, action, length, field_bsize);
+ }
+
+ if (field_bsize == 32)
+ MLX5_SET(set_action_in, action, data, ntohl(val));
+ else if (field_bsize == 16)
+ MLX5_SET(set_action_in, action, data, ntohs(val));
+ else if (field_bsize == 8)
+ MLX5_SET(set_action_in, action, data, val);
+
+ action += action_size;
+ nactions++;
+ }
+
+ parse_attr->num_mod_hdr_actions = nactions;
+ return 0;
+}
+
+static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int nkeys, action_size, max_actions;
+
+ nkeys = tcf_pedit_nkeys(a);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
+
+ /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
+ max_actions = min(max_actions, nkeys * 16);
+
+ parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
+ if (!parse_attr->mod_hdr_actions)
+ return -ENOMEM;
+
+ parse_attr->num_mod_hdr_actions = max_actions;
+ return 0;
+}
+
+static const struct pedit_headers zero_masks = {};
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
+ int nkeys, i, err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+ u8 cmd, htype;
+
+ nkeys = tcf_pedit_nkeys(a);
+
+ memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+ memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ cmd = tcf_pedit_cmd(a, i);
+ err = -EOPNOTSUPP; /* can't be all optimistic */
+
+ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
+ printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+ goto out_err;
+ }
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
+ printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+ goto out_err;
+ }
+
+ mask = tcf_pedit_mask(a, i);
+ val = tcf_pedit_val(a, i);
+ offset = tcf_pedit_offset(a, i);
+
+ err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
+ if (err)
+ goto out_err;
+ }
+
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+ if (err)
+ goto out_err;
+
+ err = offload_pedit_fields(masks, vals, parse_attr);
+ if (err < 0)
+ goto out_dealloc_parsed_actions;
+
+ for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
+ cmd_masks = &masks[cmd];
+ if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
+ cmd);
+ print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
+ 16, 1, cmd_masks, sizeof(zero_masks), true);
+ err = -EOPNOTSUPP;
+ goto out_dealloc_parsed_actions;
+ }
+ }
+
+ return 0;
+
+out_dealloc_parsed_actions:
+ kfree(parse_attr->mod_hdr_actions);
+out_err:
+ return err;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ int err;
if (tc_no_actions(exts))
return -EINVAL;
- *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- *action = 0;
+ attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ attr->action = 0;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
- if (*action)
+ if (attr->action)
return -EINVAL;
if (is_tcf_gact_shot(a)) {
- *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ continue;
+ }
+
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -685,8 +1158,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- *flow_tag = mark;
- *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flow_tag = mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -785,16 +1258,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return 0;
}
-static int gen_vxlan_header_ipv4(struct net_device *out_dev,
- char buf[],
- unsigned char h_dest[ETH_ALEN],
- int ttl,
- __be32 daddr,
- __be32 saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
+static void gen_vxlan_header_ipv4(struct net_device *out_dev,
+ char buf[], int encap_size,
+ unsigned char h_dest[ETH_ALEN],
+ int ttl,
+ __be32 daddr,
+ __be32 saddr,
+ __be16 udp_dst_port,
+ __be32 vx_vni)
{
- int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf;
struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
@@ -817,20 +1289,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni);
-
- return encap_size;
}
-static int gen_vxlan_header_ipv6(struct net_device *out_dev,
- char buf[],
- unsigned char h_dest[ETH_ALEN],
- int ttl,
- struct in6_addr *daddr,
- struct in6_addr *saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
+static void gen_vxlan_header_ipv6(struct net_device *out_dev,
+ char buf[], int encap_size,
+ unsigned char h_dest[ETH_ALEN],
+ int ttl,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ __be16 udp_dst_port,
+ __be32 vx_vni)
{
- int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf;
struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
@@ -852,23 +1321,29 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni);
-
- return encap_size;
}
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
- int encap_size, ttl, err;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
char *encap_header;
+ int ttl, err;
+ u8 nud_state;
- encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+ if (max_encap_size < ipv4_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv4_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
+
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
@@ -885,58 +1360,94 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
&fl4, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
-
- e->n = n;
- e->out_dev = *out_dev;
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
- e->h_dest, ttl,
- fl4.daddr,
- fl4.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
+ gen_vxlan_header_ipv4(out_dev, encap_header,
+ ipv4_encap_size, e->h_dest, ttl,
+ fl4.daddr,
+ fl4.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- encap_size, encap_header, &e->encap_id);
+ ipv4_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
-
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
- int encap_size, err, ttl = 0;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
char *encap_header;
+ int err, ttl = 0;
+ u8 nud_state;
+
+ if (max_encap_size < ipv6_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv6_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
- encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
@@ -954,57 +1465,88 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
&fl6, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
-
- e->n = n;
- e->out_dev = *out_dev;
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
- e->h_dest, ttl,
- &fl6.daddr,
- &fl6.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
+ gen_vxlan_header_ipv6(out_dev, encap_header,
+ ipv6_encap_size, e->h_dest, ttl,
+ &fl6.daddr,
+ &fl6.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- encap_size, encap_header, &e->encap_id);
+ ipv6_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
- struct mlx5_esw_flow_attr *attr)
+ struct net_device **encap_dev,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info);
+ struct mlx5e_priv *up_priv = netdev_priv(up_dev);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
- struct mlx5_encap_entry *e;
- struct net_device *out_dev;
- int tunnel_type, err = -EOPNOTSUPP;
+ struct mlx5e_encap_entry *e;
+ int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
@@ -1039,10 +1581,8 @@ vxlan_encap_offload_err:
}
}
- if (found) {
- attr->encap = e;
- return 0;
- }
+ if (found)
+ goto attach_flow;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
@@ -1053,16 +1593,21 @@ vxlan_encap_offload_err:
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
- err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
- if (err)
+ if (err && err != -EAGAIN)
goto out_err;
- attr->encap = e;
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+attach_flow:
+ list_add(&flow->encap, &e->flows);
+ *encap_dev = e->out_dev;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
+ attr->encap_id = e->encap_id;
+
return err;
out_err:
@@ -1071,20 +1616,22 @@ out_err:
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err;
+ int err = 0;
if (tc_no_actions(exts))
return -EINVAL;
memset(attr, 0, sizeof(*attr));
- attr->in_rep = priv->ppriv;
+ attr->in_rep = rpriv->rep;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
@@ -1094,9 +1641,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ continue;
+ }
+
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out_dev;
+ struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1106,18 +1663,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
- attr->out_rep = out_priv->ppriv;
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
} else if (encap) {
err = mlx5e_attach_encap(priv, info,
- out_dev, attr);
- if (err)
+ out_dev, &encap_dev, flow);
+ if (err && err != -EAGAIN)
return err;
- list_add(&flow->encap, &attr->encap->flows);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- out_priv = netdev_priv(attr->encap->out_dev);
- attr->out_rep = out_priv->ppriv;
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
+ attr->parse_attr = parse_attr;
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@@ -1157,28 +1716,30 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- return 0;
+ return err;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err, attr_size = 0;
- u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
- struct mlx5_flow_spec *spec;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int attr_size, err = 0;
u8 flow_flags = 0;
if (esw && esw->mode == SRIOV_OFFLOADS) {
flow_flags = MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
+ } else {
+ flow_flags = MLX5E_TC_FLOW_NIC;
+ attr_size = sizeof(struct mlx5_nic_flow_attr);
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec || !flow) {
+ parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
}
@@ -1186,42 +1747,54 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
flow->cookie = f->cookie;
flow->flags = flow_flags;
- err = parse_cls_flower(priv, flow, spec, f);
+ err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
goto err_free;
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
- err = parse_tc_fdb_actions(priv, f->exts, flow);
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
- goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
+ goto err_handle_encap_flow;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
- err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+ flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
}
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_del_rule;
+ goto err_free;
}
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
goto err_del_rule;
- goto out;
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
+ !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ kvfree(parse_attr);
+ return err;
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
+err_handle_encap_flow:
+ if (err == -EAGAIN) {
+ err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ tc->ht_params);
+ if (err)
+ mlx5e_tc_del_flow(priv, flow);
+ else
+ return 0;
+ }
+
err_free:
+ kvfree(parse_attr);
kfree(flow);
-out:
- kvfree(spec);
return err;
}
@@ -1240,7 +1813,6 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
mlx5e_tc_del_flow(priv, flow);
-
kfree(flow);
return 0;
@@ -1263,6 +1835,9 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!flow)
return -EINVAL;
+ if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
+ return 0;
+
counter = mlx5_flow_rule_counter(flow->rule);
if (!counter)
return 0;