aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/flower
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/flower')
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c420
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h146
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h40
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c188
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c804
9 files changed, 1666 insertions, 127 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 8ea9320014ee..c1c595f8bb87 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -36,7 +36,9 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
#include "cmsg.h"
#include "main.h"
@@ -45,13 +47,9 @@
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
- u16 tmp_pop_vlan_op;
- tmp_pop_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
-
- pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
+ pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
+ pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
pop_vlan->reserved = 0;
}
@@ -60,64 +58,373 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
const struct tc_action *action)
{
size_t act_size = sizeof(struct nfp_fl_push_vlan);
- struct tcf_vlan *vlan = to_vlan(action);
u16 tmp_push_vlan_tci;
- u16 tmp_push_vlan_op;
-
- tmp_push_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
- push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
- /* Set action push vlan parameters. */
+ push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
+ push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
tmp_push_vlan_tci =
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
NFP_FL_PUSH_VLAN_CFI;
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
+static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (!out_dev->rtnl_link_ops)
+ return false;
+
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+
+ return false;
+}
+
static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
struct nfp_fl_payload *nfp_flow, bool last,
- struct net_device *in_dev)
+ struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
+ int *tun_out_cnt)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
- u16 tmp_output_op;
+ u16 tmp_flags;
int ifindex;
- /* Set action opcode to output action. */
- tmp_output_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
-
- output->a_op = cpu_to_be16(tmp_output_op);
-
- /* Set action output parameters. */
- output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
+ output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
+ output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
ifindex = tcf_mirred_ifindex(action);
out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
if (!out_dev)
return -EOPNOTSUPP;
- /* Only offload egress ports are on the same device as the ingress
- * port.
+ tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
+
+ if (tun_type) {
+ /* Verify the egress netdev matches the tunnel type. */
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ return -EOPNOTSUPP;
+
+ if (*tun_out_cnt)
+ return -EOPNOTSUPP;
+ (*tun_out_cnt)++;
+
+ output->flags = cpu_to_be16(tmp_flags |
+ NFP_FL_OUT_FLAGS_USE_TUN);
+ output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ } else {
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(tmp_flags);
+
+ /* Only offload if egress ports are on the same device as the
+ * ingress port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ if (!nfp_netdev_is_nfp_repr(out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+ }
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+{
+ struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+
+ return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+}
+
+static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
+{
+ size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
+ struct nfp_fl_pre_tunnel *pre_tun_act;
+
+ /* Pre_tunnel action must be first on action list.
+ * If other actions already exist they need pushed forward.
*/
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (act_len)
+ memmove(act_data + act_size, act_data, act_len);
+
+ pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
+
+ memset(pre_tun_act, 0, act_size);
+
+ pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
+ pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return pre_tun_act;
+}
+
+static int
+nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun)
+{
+ struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
+ size_t act_size = sizeof(struct nfp_fl_set_vxlan);
+ u32 tmp_set_vxlan_type_index = 0;
+ /* Currently support one pre-tunnel so index is always 0. */
+ int pretun_idx = 0;
+
+ if (vxlan->options_len) {
+ /* Do not support options e.g. vxlan gpe. */
return -EOPNOTSUPP;
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ }
+
+ set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* Set tunnel type and pre-tunnel index. */
+ tmp_set_vxlan_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+
+ set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
+
+ set_vxlan->tun_id = vxlan->key.tun_id;
+ set_vxlan->tun_flags = vxlan->key.tun_flags;
+ set_vxlan->ipv4_ttl = vxlan->key.ttl;
+ set_vxlan->ipv4_tos = vxlan->key.tos;
+
+ /* Complete pre_tunnel action. */
+ pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+
+ return 0;
+}
+
+static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
+{
+ u32 oldvalue = get_unaligned((u32 *)p_exact);
+ u32 oldmask = get_unaligned((u32 *)p_mask);
+
+ value &= mask;
+ value |= oldvalue & ~mask;
+
+ put_unaligned(oldmask | mask, (u32 *)p_mask);
+ put_unaligned(value, (u32 *)p_exact);
+}
+
+static int
+nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_eth *set_eth)
+{
+ u32 exact, mask;
+
+ if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
return -EOPNOTSUPP;
- nfp_flow->meta.shortcut = output->port;
+ nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
+ &set_eth->eth_addr_mask[off]);
+
+ set_eth->reserved = cpu_to_be16(0);
+ set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
+ set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ip4_addrs *set_ip_addr)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ switch (off) {
+ case offsetof(struct iphdr, daddr):
+ set_ip_addr->ipv4_dst_mask = mask;
+ set_ip_addr->ipv4_dst = exact;
+ break;
+ case offsetof(struct iphdr, saddr):
+ set_ip_addr->ipv4_src_mask = mask;
+ set_ip_addr->ipv4_src = exact;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_ip_addr->reserved = cpu_to_be16(0);
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static void
+nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_addr *ip6)
+{
+ ip6->ipv6[idx % 4].mask = mask;
+ ip6->ipv6[idx % 4].exact = exact;
+
+ ip6->reserved = cpu_to_be16(0);
+ ip6->head.jump_id = opcode_tag;
+ ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
+}
+
+static int
+nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ipv6_addr *ip_dst,
+ struct nfp_fl_set_ipv6_addr *ip_src)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ if (off < offsetof(struct ipv6hdr, saddr))
+ return -EOPNOTSUPP;
+ else if (off < offsetof(struct ipv6hdr, daddr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ exact, mask, ip_src);
+ else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ exact, mask, ip_dst);
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_tport *set_tport, int opcode)
+{
+ u32 exact, mask;
+
+ if (off)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
+ set_tport->tp_port_mask);
+
+ set_tport->reserved = cpu_to_be16(0);
+ set_tport->head.jump_id = opcode;
+ set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+{
+ struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ip4_addrs set_ip_addr;
+ struct nfp_fl_set_tport set_tport;
+ struct nfp_fl_set_eth set_eth;
+ enum pedit_header_type htype;
+ int idx, nkeys, err;
+ size_t act_size;
+ u32 offset, cmd;
+
+ memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
+ memset(&set_ip6_src, 0, sizeof(set_ip6_src));
+ memset(&set_ip_addr, 0, sizeof(set_ip_addr));
+ memset(&set_tport, 0, sizeof(set_tport));
+ memset(&set_eth, 0, sizeof(set_eth));
+ nkeys = tcf_pedit_nkeys(action);
+
+ for (idx = 0; idx < nkeys; idx++) {
+ cmd = tcf_pedit_cmd(action, idx);
+ htype = tcf_pedit_htype(action, idx);
+ offset = tcf_pedit_offset(action, idx);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
+ return -EOPNOTSUPP;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(action, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
+ &set_ip6_src);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (err)
+ return err;
+ }
+
+ if (set_eth.head.len_lw) {
+ act_size = sizeof(set_eth);
+ memcpy(nfp_action, &set_eth, act_size);
+ *a_len += act_size;
+ } else if (set_ip_addr.head.len_lw) {
+ act_size = sizeof(set_ip_addr);
+ memcpy(nfp_action, &set_ip_addr, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ /* TC compiles set src and dst IPv6 address as a single action,
+ * the hardware requires this to be 2 separate actions.
+ */
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+
+ act_size = sizeof(set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
+ act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw) {
+ act_size = sizeof(set_ip6_dst);
+ memcpy(nfp_action, &set_ip6_dst, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_src.head.len_lw) {
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+ } else if (set_tport.head.len_lw) {
+ act_size = sizeof(set_tport);
+ memcpy(nfp_action, &set_tport, act_size);
+ *a_len += act_size;
+ }
return 0;
}
@@ -125,8 +432,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
static int
nfp_flower_loop_action(const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -139,7 +449,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -149,7 +460,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -172,6 +484,32 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
+ } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ /* Pre-tunnel action is required for tunnel encap.
+ * This checks for next hop entries on NFP.
+ * If none, the packet falls back before applying other actions.
+ */
+ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
+ sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ *a_len += sizeof(struct nfp_fl_pre_tunnel);
+
+ s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_set_vxlan);
+ } else if (is_tcf_tunnel_release(a)) {
+ /* Tunnel decap is handled by default so accept action. */
+ return 0;
+ } else if (is_tcf_pedit(a)) {
+ if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ return -EOPNOTSUPP;
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -184,18 +522,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err;
+ int act_len, act_cnt, err, tun_out_cnt;
+ enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
+ tun_type = NFP_FL_TUNNEL_NONE;
act_len = 0;
act_cnt = 0;
+ tun_out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
+ &tun_type, &tun_out_cnt);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index c3ca05d10fe1..e98bb9cdb6a3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -38,17 +38,10 @@
#include <net/dst_metadata.h>
#include "main.h"
-#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"
-#define nfp_flower_cmsg_warn(app, fmt, args...) \
- do { \
- if (net_ratelimit()) \
- nfp_warn((app)->cpp, fmt, ## args); \
- } while (0)
-
static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
@@ -57,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type)
+ enum nfp_flower_cmsg_type_port type, gfp_t flag)
{
struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN;
- skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+ skb = nfp_app_ctrl_msg_alloc(app, size, flag);
if (!skb)
return NULL;
@@ -85,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
unsigned int size;
size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
- skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
+ skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
+ GFP_KERNEL);
if (!skb)
return NULL;
@@ -116,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
- NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -188,6 +182,15 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
nfp_flower_rx_flow_stats(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
+ nfp_tunnel_request_route(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
+ nfp_tunnel_keep_alive(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
+ /* Acks from the NFP that the route is added - ignore. */
+ break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2ec60344236..66070741d55f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include "../nfp_app.h"
+#include "../nfpcore/nfp_cpp.h"
#define NFP_FLOWER_LAYER_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
@@ -56,6 +57,11 @@
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
+#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -67,13 +73,18 @@
#define NFP_FL_LW_SIZ 2
/* Action opcodes */
-#define NFP_FL_ACTION_OPCODE_OUTPUT 0
-#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
-#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
-#define NFP_FL_ACTION_OPCODE_NUM 32
-
-#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
-#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
#define NFP_FL_OUT_FLAGS_USE_TUN BIT(4)
@@ -83,21 +94,74 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+/* Tunnel ports */
+#define NFP_FL_PORT_TYPE_TUN 0x50000000
+#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+enum nfp_flower_tun_type {
+ NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_VXLAN = 2,
+};
+
+struct nfp_fl_act_head {
+ u8 jump_id;
+ u8 len_lw;
+};
+
+struct nfp_fl_set_eth {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 eth_addr_mask[ETH_ALEN * 2];
+ u8 eth_addr_val[ETH_ALEN * 2];
+};
+
+struct nfp_fl_set_ip4_addrs {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_src_mask;
+ __be32 ipv4_src;
+ __be32 ipv4_dst_mask;
+ __be32 ipv4_dst;
+};
+
+struct nfp_fl_set_ipv6_addr {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ struct {
+ __be32 mask;
+ __be32 exact;
+ } ipv6[4];
+};
+
+struct nfp_fl_set_tport {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 tp_port_mask[4];
+ u8 tp_port_val[4];
+};
+
struct nfp_fl_output {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 flags;
__be32 port;
};
struct nfp_fl_push_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
__be16 vlan_tpid;
__be16 vlan_tci;
};
struct nfp_fl_pop_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
};
@@ -115,6 +179,25 @@ struct nfp_flower_meta_one {
u16 reserved;
};
+struct nfp_fl_pre_tunnel {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_dst;
+ /* reserved for use with IPv6 addresses */
+ __be32 extra[3];
+};
+
+struct nfp_fl_set_vxlan {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be64 tun_id;
+ __be32 tun_type_index;
+ __be16 tun_flags;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be32 extra[2];
+} __packed;
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -230,6 +313,36 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | gpe_flags | Reserved | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vxlan {
+ __be32 ip_src;
+ __be32 ip_dst;
+ __be16 tun_flags;
+ u8 tos;
+ u8 ttl;
+ u8 gpe_flags;
+ u8 reserved[2];
+ u8 nxt_proto;
+ __be32 tun_id;
+};
+
+#define NFP_FL_TUN_VNI_OFFSET 8
+
/* The base header for a control message packet.
* Defines an 8-bit version, and an 8-bit type, padded
* to a 32-bit word. Rest of the packet is type-specific.
@@ -249,6 +362,11 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
@@ -282,6 +400,7 @@ enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3,
};
enum nfp_flower_cmsg_port_vnic_type {
@@ -323,6 +442,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
+static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
+{
+ return skb->len - NFP_FLOWER_CMSG_HLEN;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
@@ -334,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type);
+ enum nfp_flower_cmsg_type_port type, gfp_t flag);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 91fe03617106..8fcc90c0d2d3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false);
}
+static int
+nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
+{
+ return tc_setup_cb_egdev_register(netdev,
+ nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
+static void
+nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
+{
+ tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -142,8 +157,8 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
- struct nfp_reprs *reprs, *old_reprs;
enum nfp_port_type port_type;
+ struct nfp_reprs *reprs;
const u8 queue = 0;
int i, err;
@@ -194,11 +209,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
reprs->reprs[i]->name);
}
- old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, repr_type, reprs);
return 0;
err_reprs_clean:
@@ -222,8 +233,8 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
- struct nfp_reprs *reprs, *old_reprs;
struct sk_buff *ctrl_skb;
+ struct nfp_reprs *reprs;
unsigned int i;
int err;
@@ -280,11 +291,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port, reprs->reprs[phys_port]->name);
}
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
/* The MAC_REPR control message should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
@@ -436,6 +443,16 @@ static void nfp_flower_clean(struct nfp_app *app)
app->priv = NULL;
}
+static int nfp_flower_start(struct nfp_app *app)
+{
+ return nfp_tunnel_config_start(app);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_tunnel_config_stop(app);
+}
+
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
@@ -450,9 +467,15 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
+ .repr_init = nfp_flower_repr_netdev_init,
+ .repr_clean = nfp_flower_repr_netdev_clean,
+
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c20dd00a1cae..e6b26c5ae6e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -52,12 +52,13 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
-#define NFP_FL_META_FLAG_NEW_MASK 128
-#define NFP_FL_META_FLAG_LAST_MASK 1
+#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
+#define NFP_FL_VXLAN_PORT 4789
+
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
struct timespec64 *last_used;
@@ -82,6 +83,18 @@ struct nfp_fl_stats_id {
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
+ * @nfp_mac_off_list: List of MAC addresses to offload
+ * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list: List of IPv4 addresses to offload
+ * @nfp_neigh_off_list: List of neighbour offloads
+ * @nfp_mac_off_lock: Lock for the MAC address list
+ * @nfp_mac_index_lock: Lock for the MAC index list
+ * @nfp_ipv4_off_lock: Lock for the IPv4 address list
+ * @nfp_neigh_off_lock: Lock for the neighbour address list
+ * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
+ * @nfp_mac_off_count: Number of MACs in address list
+ * @nfp_tun_mac_nb: Notifier to monitor link state
+ * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -94,6 +107,18 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
+ struct list_head nfp_mac_off_list;
+ struct list_head nfp_mac_index_list;
+ struct list_head nfp_ipv4_off_list;
+ struct list_head nfp_neigh_off_list;
+ struct mutex nfp_mac_off_lock;
+ struct mutex nfp_mac_index_lock;
+ struct mutex nfp_ipv4_off_lock;
+ spinlock_t nfp_neigh_off_lock;
+ struct ida nfp_mac_off_ids;
+ int nfp_mac_off_count;
+ struct notifier_block nfp_tun_mac_nb;
+ struct notifier_block nfp_tun_neigh_nb;
};
struct nfp_fl_key_ls {
@@ -126,6 +151,7 @@ struct nfp_fl_payload {
struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
+ __be32 nfp_tun_ipv4_addr;
char *unmasked_data;
char *mask_data;
char *action_data;
@@ -163,4 +189,14 @@ nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+int nfp_tunnel_config_start(struct nfp_app *app);
+void nfp_tunnel_config_stop(struct nfp_app *app);
+void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index d25b5038c3a2..60614d4f0e22 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -77,14 +77,17 @@ nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version)
+ bool mask_version, enum nfp_flower_tun_type tun_type)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
return 0;
}
- frame->in_port = cpu_to_be32(cmsg_port);
+ if (tun_type)
+ frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ else
+ frame->in_port = cpu_to_be32(cmsg_port);
return 0;
}
@@ -108,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
- if (mask_version)
- frame->mpls_lse = cpu_to_be32(~0);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_dissector_key_mpls *mpls;
+ u32 t_mpls;
+
+ mpls = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_MPLS,
+ target);
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ frame->mpls_lse = cpu_to_be32(t_mpls);
+ }
}
static void
@@ -140,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
@@ -158,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
@@ -169,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard LABEL/TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
@@ -187,6 +211,51 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version, __be32 *tun_dst)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_keyid *vni;
+
+ /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
+ memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ u32 temp_vni;
+
+ vni = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ target);
+ temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ frame->tun_id = cpu_to_be32(temp_vni);
+ }
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ vxlan_ips =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ target);
+ frame->ip_src = vxlan_ips->src;
+ frame->ip_dst = vxlan_ips->dst;
+ *tun_dst = vxlan_ips->dst;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -194,10 +263,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ __be32 tun_dst, tun_dst_mask = 0;
+ struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
+ tun_type = NFP_FL_TUNNEL_VXLAN;
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -216,14 +291,14 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
- false);
+ false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
- true);
+ true, tun_type);
if (err)
return err;
@@ -291,5 +366,28 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ /* Populate Exact VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
+ flow, false, &tun_dst);
+ /* Populate Mask VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
+ flow, true, &tun_dst_mask);
+ ext += sizeof(struct nfp_flower_vxlan);
+ msk += sizeof(struct nfp_flower_vxlan);
+
+ /* Configure tunnel end point MAC. */
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ netdev_repr = netdev_priv(netdev);
+ nfp_tunnel_write_macs(netdev_repr->app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3226ddc55f99..db977cf8e933 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -140,7 +140,7 @@ exit_rcu_unlock:
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
- unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_fl_stats_frame *stats_frame;
unsigned char *msg;
int i;
@@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
id = nfp_add_mask_table(app, mask_data, mask_len);
if (id < 0)
return false;
- *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
*mask_id = id;
@@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
+ if (meta_flags)
+ *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
nfp_release_mask_id(app, *mask_id);
kfree(mask_entry);
if (meta_flags)
- *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
return true;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index a18b4d2b1d3e..553f94f55dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,26 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
static int
nfp_flower_xmit_flow(struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, u8 mtype)
@@ -77,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+ skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -113,11 +131,11 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow,
+ bool egress)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
- struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -125,15 +143,64 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
+ /* If any tun dissector is used then the required set must be used. */
+ if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ return -EOPNOTSUPP;
+
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
+ struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
+ struct flow_dissector_key_control *enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->key);
+ if (!egress)
+ return -EOPNOTSUPP;
+
+ if (mask_enc_ctl->addr_type != 0xffff ||
+ enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ return -EOPNOTSUPP;
+
+ /* These fields are already verified as used. */
+ mask_ipv4 =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ flow->mask);
+ if (mask_ipv4->dst != cpu_to_be32(~0))
return -EOPNOTSUPP;
+
+ mask_enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->mask);
+ enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->key);
+
+ if (mask_enc_ports->dst != cpu_to_be16(~0) ||
+ enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ return -EOPNOTSUPP;
+
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_vxlan);
+ } else if (egress) {
+ /* Reject non tunnel matches offloaded to egress repr. */
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -146,34 +213,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
flow->key);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
- mask_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- flow->mask);
-
- key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
-
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -184,11 +232,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
- /* Currently we do not offload MPLS. */
- case cpu_to_be16(ETH_P_MPLS_UC):
- case cpu_to_be16(ETH_P_MPLS_MC):
- return -EOPNOTSUPP;
-
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -252,6 +295,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
if (!flow_pay->action_data)
goto err_free_mask;
+ flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
@@ -271,6 +315,7 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
+ * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -278,7 +323,7 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
@@ -289,7 +334,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow);
+ err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
if (err)
goto err_free_key_ls;
@@ -361,6 +406,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_flow;
+ if (nfp_flow->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+
err = nfp_flower_xmit_flow(netdev, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
@@ -407,11 +455,15 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct tc_cls_flower_offload *flower, bool egress)
{
+ if (!eth_proto_is_802_3(flower->common.protocol) ||
+ flower->common.chain_index)
+ return -EOPNOTSUPP;
+
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower);
+ return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
@@ -421,16 +473,70 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
-int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, void *type_data)
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct nfp_repr *repr = cb_priv;
+
+ if (!tc_can_offload(repr->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- struct tc_cls_flower_offload *cls_flower = type_data;
+ struct nfp_repr *repr = cb_priv;
- if (type != TC_SETUP_CLSFLOWER ||
- !is_classid_clsact_ingress(cls_flower->common.classid) ||
- !eth_proto_is_802_3(cls_flower->common.protocol) ||
- cls_flower->common.chain_index)
+ if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP;
- return nfp_flower_repr_offload(app, netdev, cls_flower);
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_flower_setup_tc_block(struct net_device *netdev,
+ struct tc_block_offload *f)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr, repr);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
new file mode 100644
index 000000000000..b03f22f29612
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <net/netevent.h>
+#include <linux/idr.h>
+#include <net/dst_metadata.h>
+#include <net/arp.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_net.h"
+
+#define NFP_FL_MAX_ROUTES 32
+
+/**
+ * struct nfp_tun_active_tuns - periodic message of active tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @ipv4: dest IPv4 address of active route
+ * @egress_port: port the encapsulated packet egressed
+ * @extra: reserved for future use
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info {
+ __be32 ipv4;
+ __be32 egress_port;
+ __be32 extra[2];
+ } tun_info[];
+};
+
+/**
+ * struct nfp_tun_neigh - neighbour/route entry on the NFP
+ * @dst_ipv4: destination IPv4 address
+ * @src_ipv4: source IPv4 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv4
+ */
+struct nfp_tun_neigh {
+ __be32 dst_ipv4;
+ __be32 src_ipv4;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
+ * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv4_addr: destination ipv4 address for route
+ * @reserved: reserved for future use
+ */
+struct nfp_tun_req_route_ipv4 {
+ __be32 ingress_port;
+ __be32 ipv4_addr;
+ __be32 reserved[2];
+};
+
+/**
+ * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
+ * @ipv4_addr: destination of route
+ * @list: list pointer
+ */
+struct nfp_ipv4_route_entry {
+ __be32 ipv4_addr;
+ struct list_head list;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX 32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+ __be32 count;
+ __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv4_addr_entry {
+ __be32 ipv4_addr;
+ int ref_count;
+ struct list_head list;
+};
+
+/**
+ * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
+ * @reserved: reserved for future use
+ * @count: number of MAC addresses in the message
+ * @index: index of MAC address in the lookup table
+ * @addr: interface MAC address
+ * @addresses: series of MACs to offload
+ */
+struct nfp_tun_mac_addr {
+ __be16 reserved;
+ __be16 count;
+ struct index_mac_addr {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ } addresses[];
+};
+
+/**
+ * struct nfp_tun_mac_offload_entry - list of MACs to offload
+ * @index: index of MAC address for offloading
+ * @addr: interface MAC address
+ * @list: list pointer
+ */
+struct nfp_tun_mac_offload_entry {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define NFP_MAX_MAC_INDEX 0xff
+
+/**
+ * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
+ * @ifindex: netdev ifindex of the device
+ * @index: index of netdevs mac on NFP
+ * @list: list pointer
+ */
+struct nfp_tun_mac_non_nfp_idx {
+ int ifindex;
+ u8 index;
+ struct list_head list;
+};
+
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_active_tuns *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ __be32 ipv4_addr;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_MAX_ROUTES) {
+ nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != sizeof(struct nfp_tun_active_tuns) +
+ sizeof(struct route_ip_info) * count) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ ipv4_addr = payload->tun_info[i].ipv4;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_repr_get(app, port);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ return true;
+
+ return false;
+}
+
+static int
+nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
+ gfp_t flag)
+{
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ return 0;
+}
+
+static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return false;
+}
+
+static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return;
+ }
+ }
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
+ return;
+ }
+
+ entry->ipv4_addr = ipv4_addr;
+ list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void
+nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh payload;
+
+ /* Only offload representor IPv4s for now. */
+ if (!nfp_netdev_is_nfp_repr(netdev))
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh));
+ payload.dst_ipv4 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID)) {
+ nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ /* Trigger ARP to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv4 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&payload, flag);
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct netevent_redirect *redir;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct nfp_app *app;
+ struct rtable *rt;
+ int err;
+
+ switch (event) {
+ case NETEVENT_REDIRECT:
+ redir = (struct netevent_redirect *)ptr;
+ n = redir->neigh;
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = (struct neighbour *)ptr;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ flow.daddr = *(__be32 *)n->primary_key;
+
+ /* Only concerned with route changes for representors. */
+ if (!nfp_netdev_is_nfp_repr(n->dev))
+ return NOTIFY_DONE;
+
+ app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app = app_priv->app;
+
+ /* Only concerned with changes to routes already added to NFP. */
+ if (!nfp_tun_has_route(app, flow.daddr))
+ return NOTIFY_DONE;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
+#else
+ return NOTIFY_DONE;
+#endif
+
+ flow.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+
+ return NOTIFY_OK;
+}
+
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv4 *payload;
+ struct net_device *netdev;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct rtable *rt;
+ int err;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ if (!netdev)
+ goto route_fail_warning;
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup on same namespace as ingress port. */
+ rt = ip_route_output_key(dev_net(netdev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ goto route_fail_warning;
+#else
+ goto route_fail_warning;
+#endif
+
+ /* Get the neighbour entry for the lookup */
+ n = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ ip_rt_put(rt);
+ if (!n)
+ goto route_fail_warning;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
+ neigh_release(n);
+ return;
+
+route_fail_warning:
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+}
+
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_tun_ipv4_addr payload;
+ struct list_head *ptr, *storage;
+ int count;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+ return;
+ }
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ payload.ipv4_addr[count++] = entry->ipv4_addr;
+ }
+ payload.count = cpu_to_be32(count);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+ sizeof(struct nfp_tun_ipv4_addr),
+ &payload, GFP_KERNEL);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count++;
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ return;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return;
+ }
+ entry->ipv4_addr = ipv4;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count--;
+ if (!entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_write_macs(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ struct nfp_tun_mac_addr *payload;
+ struct list_head *ptr, *storage;
+ int mac_count, err, pay_size;
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ if (!priv->nfp_mac_off_count) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ pay_size = sizeof(struct nfp_tun_mac_addr) +
+ sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+
+ payload = kzalloc(pay_size, GFP_KERNEL);
+ if (!payload) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+
+ mac_count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ payload->addresses[mac_count].index = entry->index;
+ ether_addr_copy(payload->addresses[mac_count].addr,
+ entry->addr);
+ mac_count++;
+ }
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ pay_size, payload, GFP_KERNEL);
+
+ kfree(payload);
+
+ if (err) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ /* Write failed so retain list for future retry. */
+ return;
+ }
+
+ /* If list was successfully offloaded, flush it. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ priv->nfp_mac_off_count = 0;
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+ int idx;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ idx = entry->index;
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+ }
+
+ idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (idx < 0) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return -ENOMEM;
+ }
+ entry->ifindex = ifindex;
+ entry->index = idx;
+ list_add_tail(&entry->list, &priv->nfp_mac_index_list);
+ mutex_unlock(&priv->nfp_mac_index_lock);
+
+ return idx;
+}
+
+static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ ida_simple_remove(&priv->nfp_mac_off_ids,
+ entry->index);
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_mac_index_lock);
+}
+
+static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
+ struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ u16 nfp_mac_idx;
+ int port = 0;
+
+ /* Check if MAC should be offloaded. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ return;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_repr_get_port_id(netdev);
+ else if (!nfp_tun_is_netdev_to_offload(netdev))
+ return;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
+ return;
+ }
+
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+ } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
+ } else {
+ /* Must assign our own unique 8-bit index. */
+ int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+
+ if (idx < 0) {
+ nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
+ kfree(entry);
+ return;
+ }
+ nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ }
+
+ entry->index = cpu_to_be16(nfp_mac_idx);
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ priv->nfp_mac_off_count++;
+ list_add_tail(&entry->list, &priv->nfp_mac_off_list);
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_mac_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* If non-nfp netdev then free its offload index. */
+ if (nfp_tun_is_netdev_to_offload(netdev))
+ nfp_tun_del_mac_idx(app, netdev->ifindex);
+ } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
+ event == NETDEV_REGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+
+ /* Force a list write to keep NFP up to date. */
+ nfp_tunnel_write_macs(app);
+ }
+ return NOTIFY_OK;
+}
+
+int nfp_tunnel_config_start(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+ int err;
+
+ /* Initialise priv data for MAC offloading. */
+ priv->nfp_mac_off_count = 0;
+ mutex_init(&priv->nfp_mac_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_off_list);
+ priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
+ mutex_init(&priv->nfp_mac_index_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_index_list);
+ ida_init(&priv->nfp_mac_off_ids);
+
+ /* Initialise priv data for IPv4 offloading. */
+ mutex_init(&priv->nfp_ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
+ /* Initialise priv data for neighbour offloading. */
+ spin_lock_init(&priv->nfp_neigh_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
+ priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ if (err)
+ goto err_free_mac_ida;
+
+ err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ if (err)
+ goto err_unreg_mac_nb;
+
+ /* Parse netdevs already registered for MACs that need offloaded. */
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev)
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+ rtnl_unlock();
+
+ return 0;
+
+err_unreg_mac_nb:
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+err_free_mac_ida:
+ ida_destroy(&priv->nfp_mac_off_ids);
+ return err;
+}
+
+void nfp_tunnel_config_stop(struct nfp_app *app)
+{
+ struct nfp_tun_mac_offload_entry *mac_entry;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *route_entry;
+ struct nfp_tun_mac_non_nfp_idx *mac_idx;
+ struct nfp_ipv4_addr_entry *ip_entry;
+ struct list_head *ptr, *storage;
+
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
+
+ /* Free any memory that may be occupied by MAC list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
+ /* Free any memory that may be occupied by MAC index list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
+ list);
+ list_del(&mac_idx->list);
+ kfree(mac_idx);
+ }
+
+ ida_destroy(&priv->nfp_mac_off_ids);
+
+ /* Free any memory that may be occupied by ipv4 list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ list_del(&ip_entry->list);
+ kfree(ip_entry);
+ }
+
+ /* Free any memory that may be occupied by the route list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
+ list);
+ list_del(&route_entry->list);
+ kfree(route_entry);
+ }
+}