aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp')
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c498
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c116
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
19 files changed, 1209 insertions, 284 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9f8a1f69c0c4..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
u8 mask, val;
int err;
- if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
- err = -EOPNOTSUPP;
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
goto err_delete;
- }
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
@@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
- err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
- if (!match) {
- err = -ENOMEM;
- goto err_delete;
- }
-
+ if (!match)
+ return -ENOMEM;
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
@@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
- return err;
+ return -EOPNOTSUPP;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index a460c75522be..d81d450be50e 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -26,6 +26,7 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_CRYPTO_ADD = 10,
NFP_CCM_TYPE_CRYPTO_DEL = 11,
NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
+ NFP_CCM_TYPE_CRYPTO_RESYNC = 13,
__NFP_CCM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index 60372ddf69f0..bffe58bb2f27 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -4,6 +4,10 @@
#ifndef NFP_CRYPTO_H
#define NFP_CRYPTO_H 1
+struct net_device;
+struct nfp_net;
+struct nfp_net_tls_resync_req;
+
struct nfp_net_tls_offload_ctx {
__be32 fw_handle[2];
@@ -17,11 +21,22 @@ struct nfp_net_tls_offload_ctx {
#ifdef CONFIG_TLS_DEVICE
int nfp_net_tls_init(struct nfp_net *nn);
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len);
#else
static inline int nfp_net_tls_init(struct nfp_net *nn)
{
return 0;
}
+
+static inline int
+nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
index 67413d946c4a..8d1458896bcb 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -9,6 +9,14 @@
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+struct nfp_net_tls_resync_req {
+ __be32 fw_handle[2];
+ __be32 tcp_seq;
+ u8 l3_offset;
+ u8 l4_offset;
+ u8 resv[2];
+};
+
struct nfp_crypto_reply_simple {
struct nfp_ccm_hdr hdr;
__be32 error;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 96a96b35c0ca..7c50e3dfb9d5 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -5,6 +5,7 @@
#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/string.h>
+#include <net/inet6_hashtables.h>
#include <net/tls.h>
#include "../ccm.h"
@@ -391,8 +392,9 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return 0;
- tls_offload_rx_resync_set_type(sk,
- TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ if (!nn->tlv_caps.tls_resync_ss)
+ tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+
return 0;
err_fw_remove:
@@ -424,6 +426,7 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
+ enum nfp_ccm_type type;
struct sk_buff *skb;
gfp_t flags;
int err;
@@ -442,15 +445,18 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
req->tcp_seq = cpu_to_be32(seq);
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+ type = NFP_CCM_TYPE_CRYPTO_UPDATE;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- err = nfp_net_tls_communicate_simple(nn, skb, "sync",
- NFP_CCM_TYPE_CRYPTO_UPDATE);
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
if (err)
return err;
ntls->next_seq = seq;
} else {
- nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ if (nn->tlv_caps.tls_resync_ss)
+ type = NFP_CCM_TYPE_CRYPTO_RESYNC;
+ nfp_ccm_mbox_post(nn, skb, type,
sizeof(struct nfp_crypto_reply_simple));
+ atomic_inc(&nn->ktls_rx_resync_sent);
}
return 0;
@@ -462,6 +468,79 @@ static const struct tlsdev_ops nfp_net_tls_ops = {
.tls_dev_resync = nfp_net_tls_resync,
};
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be32 tcp_seq;
+ int err;
+
+ iph = pkt + req->l3_offset;
+ ipv6h = pkt + req->l3_offset;
+ th = pkt + req->l4_offset;
+
+ if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
+ req->l3_offset, req->l4_offset, pkt_len);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ switch (iph->version) {
+ case 4:
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+ break;
+#endif
+ default:
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
+ req->l3_offset, req->l4_offset, iph->version);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ err = 0;
+ if (!sk)
+ goto err_cnt_ign;
+ if (!tls_is_sk_rx_device_offloaded(sk) ||
+ sk->sk_shutdown & RCV_SHUTDOWN)
+ goto err_put_sock;
+
+ ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
+ /* some FW versions can't report the handle and report 0s */
+ if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
+ memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
+ goto err_put_sock;
+
+ /* copy to ensure alignment */
+ memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
+ tls_offload_rx_resync_request(sk, tcp_seq);
+ atomic_inc(&nn->ktls_rx_resync_req);
+
+ sock_gen_put(sk);
+ return 0;
+
+err_put_sock:
+ sock_gen_put(sk);
+err_cnt_ign:
+ atomic_inc(&nn->ktls_rx_resync_ign);
+ return err;
+}
+
static int nfp_net_tls_reset(struct nfp_net *nn)
{
struct nfp_crypto_req_reset *req;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1b019fdfcd97..c06600fb47ff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -22,8 +22,9 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
-#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
-#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
+ IP_TUNNEL_INFO_IPV6)
+#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev, struct netlink_ext_ack *extack)
+nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
+ bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
+ size_t act_size = sizeof(struct nfp_fl_set_tun);
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
+ return -EOPNOTSUPP;
+
+ if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
+ return -EOPNOTSUPP;
+
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
return -EOPNOTSUPP;
}
- set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
- FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+ FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
+ FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
+#ifdef CONFIG_IPV6
+ } else if (ipv6) {
+ struct net *net = dev_net(netdev);
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+
+ flow.daddr = ip_tun->key.u.ipv6.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
+ if (!IS_ERR(dst)) {
+ set_tun->ttl = ip6_dst_hoplimit(dst);
+ dst_release(dst);
+ } else {
+ set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ }
+#endif
} else {
struct net *net = dev_net(netdev);
struct flowi4 flow = {};
@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
}
@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
}
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ if (ipv6) {
+ pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
+ pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
+ } else {
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ }
return 0;
}
@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
}
@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev, extack);
+ err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
+ netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
+ *a_len += sizeof(struct nfp_fl_set_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 05981b54eaab..a595ddb92bff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
- nfp_tunnel_request_route(app, skb);
+ nfp_tunnel_request_route_v4(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
+ nfp_tunnel_request_route_v6(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
+ nfp_tunnel_keep_alive_v6(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
- } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 7eb2ec8969c3..9b50d76bbc09 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,6 +26,7 @@
#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
+#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
@@ -63,6 +64,7 @@
#define NFP_FL_MAX_GENEVE_OPT_ACT 32
#define NFP_FL_MAX_GENEVE_OPT_CNT 64
#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
@@ -70,7 +72,7 @@
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
-#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
@@ -99,8 +101,8 @@
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000
-#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
-#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FL_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag {
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
- __be16 reserved;
- __be32 ipv4_dst;
- /* reserved for use with IPv6 addresses */
- __be32 extra[3];
+ __be16 flags;
+ union {
+ __be32 ipv4_dst;
+ struct in6_addr ipv6_dst;
+ };
};
-struct nfp_fl_set_ipv4_tun {
+#define NFP_FL_PRE_TUN_IPV6 BIT(0)
+
+struct nfp_fl_set_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 {
__be32 dst;
};
+struct nfp_flower_tun_ipv6 {
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
struct nfp_flower_tun_ip_ext {
u8 tos;
u8 ttl;
@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun {
__be32 tun_id;
};
+/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_udp_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 reserved1;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be32 reserved2;
+ __be32 tun_id;
+};
+
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* -----------------------------------------------------------------
* 3 2 1
@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun {
__be32 reserved2;
};
+/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_gre_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e0c985fcaec1..d55d0d33bc45 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -43,6 +43,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
+#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -62,18 +63,26 @@ struct nfp_fl_stats_id {
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload
- * @neigh_off_list: List of neighbour offloads
+ * @ipv6_off_list: List of IPv6 addresses to offload
+ * @neigh_off_list_v4: List of IPv4 neighbour offloads
+ * @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list
- * @neigh_off_lock: Lock for the neighbour address list
+ * @ipv6_off_lock: Lock for the IPv6 address list
+ * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
+ * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs;
struct list_head ipv4_off_list;
- struct list_head neigh_off_list;
+ struct list_head ipv6_off_list;
+ struct list_head neigh_off_list_v4;
+ struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock;
- spinlock_t neigh_off_lock;
+ struct mutex ipv6_off_lock;
+ spinlock_t neigh_off_lock_v4;
+ spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids;
struct notifier_block neigh_nb;
};
@@ -273,12 +282,25 @@ struct nfp_fl_stats {
u64 used;
};
+/**
+ * struct nfp_ipv6_addr_entry - cached IPv6 addresses
+ * @ipv6_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv6_addr_entry {
+ struct in6_addr ipv6_addr;
+ int ref_count;
+ struct list_head list;
+};
+
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct rhash_head fl_node;
struct rcu_head rcu;
__be32 nfp_tun_ipv4_addr;
+ struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
unsigned long event, void *ptr);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9cc3ba17ff69..546bc01d507d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,8 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_cls_offload *flow, u8 key_type)
+ struct flow_rule *rule, u8 key_type)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
- struct nfp_flower_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
- struct nfp_flower_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
- struct nfp_flower_ipv6 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
- flow_rule_match_enc_opts(flow->rule, &match);
+ flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len);
@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
@@ -299,12 +282,26 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
}
static void
+nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
+ struct nfp_flower_tun_ipv6 *msk,
+ struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
}
static void
-nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
- struct nfp_flower_ipv4_gre_tun *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ u32 vni;
- /* NVGRE is the only supported GRE tunnel type */
- ext->ethertype = cpu_to_be16(ETH_P_TEB);
- msk->ethertype = cpu_to_be16(~0);
+ flow_rule_match_enc_keyid(rule, &match);
+ vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key = cpu_to_be32(vni);
+ vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key_msk = cpu_to_be32(vni);
+ }
+}
+static void
+nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
+ __be16 *flags_msk, struct flow_rule *rule)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- ext->tun_key = match.key->keyid;
- msk->tun_key = match.mask->keyid;
+ *key = match.key->keyid;
+ *key_msk = match.mask->keyid;
- ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
- msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid match;
- u32 temp_vni;
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
- flow_rule_match_enc_keyid(rule, &match);
- temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- ext->tun_id = cpu_to_be32(temp_vni);
- temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- msk->tun_id = cpu_to_be32(temp_vni);
- }
+static void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
+
+static void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u32 port_id;
int err;
u8 *ext;
@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer);
+ rule, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
(struct nfp_flower_tp_ports *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
(struct nfp_flower_ipv4 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
(struct nfp_flower_ipv6 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_gre_tun);
- msk += sizeof(struct nfp_flower_ipv4_gre_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_gre_tun((void *)ext,
+ (void *)msk, rule);
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
+ dst = &gre_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv6_gre_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_udp_tun);
- msk += sizeof(struct nfp_flower_ipv4_udp_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_udp_tun((void *)ext,
+ (void *)msk, rule);
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
+ dst = &udp_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_udp_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, flow);
+ err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 987ae221f6be..7ca5c1becfcf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -54,6 +54,10 @@
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
+
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
@@ -64,7 +68,8 @@
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
- NFP_FLOWER_LAYER_IPV4)
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check {
union {
@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size,
+ u32 *key_layer_two, int *key_size, bool ipv6,
struct netlink_ext_ack *extack)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
+ (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
}
@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv,
- enum nfp_flower_tun_type *tun_type,
+ enum nfp_flower_tun_type *tun_type, bool ipv6,
struct netlink_ext_ack *extack)
{
int err;
@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (!enc_op)
break;
@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
- err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
- key_size, extack);
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
+ ipv6, extack);
if (err)
return err;
break;
@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
struct flow_match_control enc_ctl;
struct flow_match_ports enc_ports;
+ bool ipv6_tun = false;
flow_rule_match_enc_control(rule, &enc_ctl);
@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
}
- if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
+
+ ipv6_tun = enc_ctl.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (ipv6_tun &&
+ !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
return -EOPNOTSUPP;
}
- /* These fields are already verified as used. */
- flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ if (!ipv6_tun &&
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
return -EOPNOTSUPP;
}
+ if (ipv6_tun) {
+ flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
+ if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
+ sizeof(ipv6_addrs.mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (netif_is_gretap(netdev)) {
- *tun_type = NFP_FL_TUNNEL_GRE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GRE;
- key_size +=
- sizeof(struct nfp_flower_ipv4_gre_tun);
+ if (!netif_is_gretap(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ return -EOPNOTSUPP;
+ }
- if (enc_op.key) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
- return -EOPNOTSUPP;
- }
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+
+ if (ipv6_tun) {
+ key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ key_size +=
+ sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP;
}
} else {
@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
&key_layer_two,
&key_layer,
&key_size, priv,
- tun_type, extack);
+ tun_type, ipv6_tun,
+ extack);
if (err)
return err;
@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask;
flow_pay->nfp_tun_ipv4_addr = 0;
+ flow_pay->nfp_tun_ipv6 = NULL;
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
+ bool ipv6_tun = false;
u8 act_id = 0;
u8 *ports;
int i;
@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0);
break;
- case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports));
- memset(&merge->ipv4, 0xff,
- sizeof(struct nfp_flower_ipv4));
+ if (ipv6_tun)
+ memset(&merge->ipv6, 0xff,
+ sizeof(struct nfp_flower_ipv6));
+ else
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a;
@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
ports[i] |= tport->tp_port_mask[i];
break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ pre_tun = (struct nfp_fl_pre_tunnel *)a;
+ ipv6_tun = be16_to_cpu(pre_tun->flags) &
+ NFP_FL_PRE_TUN_IPV6;
+ break;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break;
@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
- struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_set_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
- tun = (struct nfp_fl_set_ipv4_tun *)a;
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
+ tun = (struct nfp_fl_set_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
+ key_layer & NFP_FLOWER_LAYER_IPV6) {
+ /* Flags and proto fields have same offset in IPv4 and IPv6. */
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int size;
int i;
+ size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
+ sizeof(struct nfp_flower_ipv4) :
+ sizeof(struct nfp_flower_ipv6);
+
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
- for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ for (i = 0; i < size; i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
@@ -1195,6 +1262,8 @@ err_remove_rhash:
err_release_metadata:
nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow:
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
kfree(flow_pay->action_data);
kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data);
@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (nfp_flow->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
+
if (!nfp_flow->in_hw) {
err = 0;
goto err_free_merge_flow;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2600ce476d6b..2df3deedf9fd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -55,6 +55,25 @@ struct nfp_tun_active_tuns {
};
/**
+ * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @tun_info.ipv6: dest IPv6 address of active route
+ * @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns_v6 {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info_v6 {
+ struct in6_addr ipv6;
+ __be32 egress_port;
+ } tun_info[];
+};
+
+/**
* struct nfp_tun_neigh - neighbour/route entry on the NFP
* @dst_ipv4: destination IPv4 address
* @src_ipv4: source IPv4 address
@@ -71,6 +90,22 @@ struct nfp_tun_neigh {
};
/**
+ * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
+ * @dst_ipv6: destination IPv6 address
+ * @src_ipv6: source IPv6 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv6
+ */
+struct nfp_tun_neigh_v6 {
+ struct in6_addr dst_ipv6;
+ struct in6_addr src_ipv6;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request
* @ipv4_addr: destination ipv4 address for route
@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 {
};
/**
- * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
- * @ipv4_addr: destination of route
+ * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv6_addr: destination ipv6 address for route
+ */
+struct nfp_tun_req_route_ipv6 {
+ __be32 ingress_port;
+ struct in6_addr ipv6_addr;
+};
+
+/**
+ * struct nfp_offloaded_route - routes that are offloaded to the NFP
* @list: list pointer
+ * @ip_add: destination of route - can be IPv4 or IPv6
*/
-struct nfp_ipv4_route_entry {
- __be32 ipv4_addr;
+struct nfp_offloaded_route {
struct list_head list;
+ u8 ip_add[];
};
#define NFP_FL_IPV4_ADDRS_MAX 32
@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
+#define NFP_FL_IPV6_ADDRS_MAX 4
+
+/**
+ * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
+ */
+struct nfp_tun_ipv6_addr {
+ __be32 count;
+ struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
+};
+
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
rcu_read_unlock();
}
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct nfp_tun_active_tuns_v6 *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ void *ipv6_add;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != struct_size(payload, tun_info, count)) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ ipv6_add = &payload->tun_info[i].ipv6;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_dev_get(app, port, NULL);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+ rcu_read_unlock();
+#endif
+}
+
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0;
}
-static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+static bool
+__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
+ void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
return true;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
return false;
}
-static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+static int
+__nfp_tun_add_route_to_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- return;
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
+ return 0;
}
- }
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+
+ entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
- return;
+ spin_unlock_bh(list_lock);
+ return -ENOMEM;
}
- entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->tun.neigh_off_list);
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ memcpy(entry->ip_add, add, add_len);
+ list_add_tail(&entry->list, route_list);
+ spin_unlock_bh(list_lock);
+
+ return 0;
}
-static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+static void
+__nfp_tun_del_route_from_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
list_del(&entry->list);
kfree(entry);
break;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
+}
+
+static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static bool
+nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
}
static void
-nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
u32 port_id;
@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
goto send_msg;
@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+ nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
@@ -322,16 +477,54 @@ send_msg:
(unsigned char *)&payload, flag);
}
+static void
+nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
+ struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh_v6 payload;
+ u32 port_id;
+
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
+ payload.dst_ipv6 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
+ nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
+ /* Trigger probe to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv6 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(port_id);
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh_v6),
+ (unsigned char *)&payload, flag);
+}
+
static int
nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir;
- struct flowi4 flow = {};
+ struct flowi4 flow4 = {};
+ struct flowi6 flow6 = {};
struct neighbour *n;
struct nfp_app *app;
struct rtable *rt;
+ bool ipv6 = false;
int err;
switch (event) {
@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
- flow.daddr = *(__be32 *)n->primary_key;
+ if (n->tbl->family == AF_INET6)
+ ipv6 = true;
+
+ if (ipv6)
+ flow6.daddr = *(struct in6_addr *)n->primary_key;
+ else
+ flow4.daddr = *(__be32 *)n->primary_key;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
/* Only concerned with changes to routes already added to NFP. */
- if (!nfp_tun_has_route(app, flow.daddr))
+ if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
+ (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
- /* Do a route lookup to populate flow data. */
- rt = ip_route_output_key(dev_net(n->dev), &flow);
- err = PTR_ERR_OR_ZERO(rt);
- if (err)
+ if (ipv6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *dst;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+ return NOTIFY_DONE;
+
+ dst_release(dst);
+ flow6.flowi6_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
+#else
return NOTIFY_DONE;
+#endif /* CONFIG_IPV6 */
+ } else {
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
- ip_rt_put(rt);
+ ip_rt_put(rt);
+
+ flow4.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
+ }
#else
return NOTIFY_DONE;
-#endif
-
- flow.flowi4_proto = IPPROTO_UDP;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+#endif /* CONFIG_INET */
return NOTIFY_OK;
}
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_tun_req_route_ipv4 *payload;
struct net_device *netdev;
@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
rcu_read_unlock();
return;
@@ -421,6 +638,48 @@ fail_rcu_unlock:
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv6 *payload;
+ struct net_device *netdev;
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
+
+ flow.daddr = payload->ipv6_addr;
+ flow.flowi6_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
+ NULL);
+ if (IS_ERR(dst))
+ goto fail_rcu_unlock;
+#else
+ goto fail_rcu_unlock;
+#endif
+
+ n = dst_neigh_lookup(dst, &flow.daddr);
+ dst_release(dst);
+ if (!n)
+ goto fail_rcu_unlock;
+
+ nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
+ neigh_release(n);
+ rcu_read_unlock();
+ return;
+
+fail_rcu_unlock:
+ rcu_read_unlock();
+ nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
+}
+
static void nfp_tun_write_ipv4_list(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app);
}
+static void nfp_tun_write_ipv6_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+ struct nfp_tun_ipv6_addr payload;
+ int count = 0;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
+ if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
+ break;
+ }
+ payload.ipv6_addr[count++] = entry->ipv6_addr;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ payload.count = cpu_to_be32(count);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
+ sizeof(struct nfp_tun_ipv6_addr),
+ &payload, GFP_KERNEL);
+}
+
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
+ if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
+ entry->ref_count++;
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ return entry;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return NULL;
+ }
+ entry->ipv6_addr = *ipv6;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ nfp_tun_write_ipv6_list(app);
+
+ return entry;
+}
+
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ bool freed = false;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ if (!--entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ freed = true;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ if (freed)
+ nfp_tun_write_ipv6_list(app);
+}
+
static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app)
ida_init(&priv->tun.mac_off_ids);
- /* Initialise priv data for IPv4 offloading. */
+ /* Initialise priv data for IPv4/v6 offloading. */
mutex_init(&priv->tun.ipv4_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
+ mutex_init(&priv->tun.ipv6_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->tun.neigh_off_lock);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ spin_lock_init(&priv->tun.neigh_off_lock_v4);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
+ spin_lock_init(&priv->tun.neigh_off_lock_v6);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb);
@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app)
{
+ struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *route_entry;
struct nfp_ipv4_addr_entry *ip_entry;
+ struct nfp_tun_neigh_v6 ipv6_route;
+ struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb);
@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
kfree(ip_entry);
}
- /* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
- list);
+ mutex_destroy(&priv->tun.ipv6_off_lock);
+
+ /* Free memory in the route list and remove entries from fw cache. */
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v4, list) {
+ memset(&ipv4_route, 0, sizeof(ipv4_route));
+ memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
+ sizeof(ipv4_route.dst_ipv4));
list_del(&route_entry->list);
kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv4_route,
+ GFP_KERNEL);
+ }
+
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v6, list) {
+ memset(&ipv6_route, 0, sizeof(ipv6_route));
+ memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
+ sizeof(ipv6_route.dst_ipv6));
+ list_del(&route_entry->list);
+ kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv6_route,
+ GFP_KERNEL);
}
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 250f510b1d21..ff4438478ea9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -586,6 +586,9 @@ struct nfp_net_dp {
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space
+ * @ktls_rx_resync_req: Counter of TLS RX resync requested
+ * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
+ * @ktls_rx_resync_sent: Counter of TLS RX resync completed
* @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
@@ -674,6 +677,9 @@ struct nfp_net {
atomic64_t ktls_conn_id_gen;
atomic_t ktls_no_space;
+ atomic_t ktls_rx_resync_req;
+ atomic_t ktls_rx_resync_ign;
+ atomic_t ktls_rx_resync_sent;
struct {
struct sk_buff_head queue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index bcdcd6de7dea..9bfb3b077bc1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -47,6 +47,7 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
+#include "crypto/fw.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -1321,17 +1322,11 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
netdev_tx_reset_queue(nd_q);
}
-static void nfp_net_tx_timeout(struct net_device *netdev)
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
- int i;
- for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
- continue;
- nn_warn(nn, "TX timeout on ring: %d\n", i);
- }
- nn_warn(nn, "TX watchdog timeout\n");
+ nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
/* Receive processing
@@ -1667,9 +1662,9 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
&rx_hash->hash);
}
-static void *
+static bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, int meta_len)
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
u32 meta_info;
@@ -1699,14 +1694,20 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
(__force __wsum)__get_unaligned_cpu32(data);
data += 4;
break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return NULL;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
default:
- return NULL;
+ return true;
}
meta_info >>= NFP_NET_META_FIELD_SIZE;
}
- return data;
+ return data != pkt;
}
static void
@@ -1891,12 +1892,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- meta_len);
- if (unlikely(end != rxbuf->frag + pkt_off)) {
+ if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index d835c14b7257..c3a763134e79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -17,6 +17,30 @@ static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
}
+static bool
+nfp_net_tls_parse_crypto_ops(struct device *dev, struct nfp_net_tlv_caps *caps,
+ u8 __iomem *ctrl_mem, u8 __iomem *data,
+ unsigned int length, unsigned int offset,
+ bool rx_stream_scan)
+{
+ /* Ignore the legacy TLV if new one was already parsed */
+ if (caps->tls_resync_ss && !rx_stream_scan)
+ return true;
+
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return false;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->tls_resync_ss = rx_stream_scan;
+
+ return true;
+}
+
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps)
{
@@ -104,15 +128,25 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->mbox_cmsg_types = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
- if (length < 32) {
- dev_err(dev,
- "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
- length, offset);
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ false))
return -EINVAL;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ if ((data - ctrl_mem) % 8) {
+ dev_warn(dev, "VNIC STATS TLV misaligned, ignoring offset:%u len:%u\n",
+ offset, length);
+ break;
}
-
- caps->crypto_ops = readl(data);
- caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->vnic_stats_off = data - ctrl_mem;
+ caps->vnic_stats_cnt = length / 10;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ true))
+ return -EINVAL;
break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ee6b24e4eacd..3d61a8cb60b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -45,6 +45,7 @@
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
+#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -479,6 +480,22 @@
* 8 words, bitmaps of supported and enabled crypto operations.
* First 16B (4 words) contains a bitmap of supported crypto operations,
* and next 16B contain the enabled operations.
+ * This capability is made obsolete by ones with better sync methods.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert
+ * zero-length RESERVED TLV to pad).
+ * TLV data has two sections. First is an array of statistics' IDs (2B each).
+ * Second 8B statistics themselves. Statistics are 8B aligned, meaning there
+ * may be a padding between sections.
+ * Number of statistics can be determined as floor(tlv.length / (2 + 8)).
+ * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV
+ * duplicate the old ones, so driver should be careful not to unnecessarily
+ * render both).
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan
+ * RX sync, rather than kernel-assisted sync.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -490,6 +507,8 @@
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
+#define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13
struct device;
@@ -502,6 +521,9 @@ struct device;
* @mbox_cmsg_types: cmsgs which can be passed through the mailbox
* @crypto_ops: supported crypto operations
* @crypto_enable_off: offset of crypto ops enable region
+ * @vnic_stats_off: offset of vNIC stats area
+ * @vnic_stats_cnt: number of vNIC stats
+ * @tls_resync_ss: TLS resync will be performed via stream scan
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
@@ -511,6 +533,9 @@ struct nfp_net_tlv_caps {
u32 mbox_cmsg_types;
u32 crypto_ops;
unsigned int crypto_enable_off;
+ unsigned int vnic_stats_off;
+ unsigned int vnic_stats_cnt;
+ unsigned int tls_resync_ss:1;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b840ee47339..d648e32c0520 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -148,11 +148,33 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};
+static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
+ [1] = "dev_rx_discards",
+ [2] = "dev_rx_errors",
+ [3] = "dev_rx_bytes",
+ [4] = "dev_rx_uc_bytes",
+ [5] = "dev_rx_mc_bytes",
+ [6] = "dev_rx_bc_bytes",
+ [7] = "dev_rx_pkts",
+ [8] = "dev_rx_mc_pkts",
+ [9] = "dev_rx_bc_pkts",
+
+ [10] = "dev_tx_discards",
+ [11] = "dev_tx_errors",
+ [12] = "dev_tx_bytes",
+ [13] = "dev_tx_uc_bytes",
+ [14] = "dev_tx_mc_bytes",
+ [15] = "dev_tx_bc_bytes",
+ [16] = "dev_tx_pkts",
+ [17] = "dev_tx_mc_pkts",
+ [18] = "dev_tx_bc_pkts",
+};
+
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
-#define NN_CTRL_PATH_STATS 1
+#define NN_CTRL_PATH_STATS 4
#define SFP_SFF_REV_COMPLIANCE 1
@@ -454,6 +476,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
data = nfp_pr_et(data, "hw_tls_no_space");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ok");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ign");
+ data = nfp_pr_et(data, "rx_tls_resync_sent");
return data;
}
@@ -502,6 +527,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
*data++ = gathered_stats[j];
*data++ = atomic_read(&nn->ktls_no_space);
+ *data++ = atomic_read(&nn->ktls_rx_resync_req);
+ *data++ = atomic_read(&nn->ktls_rx_resync_ign);
+ *data++ = atomic_read(&nn->ktls_rx_resync_sent);
return data;
}
@@ -560,6 +588,65 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
return data;
}
+static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
+{
+ return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
+}
+
+static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
+{
+ unsigned int i, id;
+ u8 __iomem *mem;
+ u64 id_word = 0;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
+ if (!(i % 4))
+ id_word = readq(mem + i * 2);
+
+ id = (u16)id_word;
+ id_word >>= 16;
+
+ if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
+ nfp_tlv_stat_names[id][0]) {
+ memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ } else {
+ data = nfp_pr_et(data, "dev_unknown_stat%u", id);
+ }
+ }
+
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ data = nfp_pr_et(data, "rxq_%u_pkts", i);
+ data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
+ }
+
+ return data;
+}
+
+static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
+{
+ u8 __iomem *mem;
+ unsigned int i;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
+ *data++ = readq(mem + i * 8);
+
+ mem = nn->dp.ctrl_bar;
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
+ }
+
+ return data;
+}
+
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
struct nfp_port *port;
@@ -609,8 +696,12 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
- false);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats_strings(data,
+ nn->max_r_vecs,
+ false);
+ else
+ data = nfp_vnic_get_tlv_stats_strings(nn, data);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
@@ -624,7 +715,11 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
+ nn->max_r_vecs);
+ else
+ data = nfp_vnic_get_tlv_stats(nn, data);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -632,13 +727,18 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int cnt;
switch (sset) {
case ETH_SS_STATS:
- return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
- nfp_mac_get_stats_count(netdev) +
- nfp_app_port_get_stats_count(nn->port);
+ cnt = nfp_vnic_get_sw_stats_count(netdev);
+ if (!nn->tlv_caps.vnic_stats_off)
+ cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
+ else
+ cnt += nfp_vnic_get_tlv_stats_count(nn);
+ cnt += nfp_mac_get_stats_count(netdev);
+ cnt += nfp_app_port_get_stats_count(nn->port);
+ return cnt;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e4977cdf7678..c0e2f4394aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
+ ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
@@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
- vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+ vf->q_bar = ioremap(map_addr, bar_sz);
if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* TX queues */
map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
- nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
+ nn->tx_bar = ioremap(map_addr, tx_bar_sz);
if (!nn->tx_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* RX queues */
map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
- nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
+ nn->rx_bar = ioremap(map_addr, rx_bar_sz);
if (!nn->rx_bar) {
nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
err = -EIO;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 85d46f206b3c..b454db283aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
int pf;
@@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
}
bar = &nfp->bar[4 + i];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
msg += snprintf(msg, end - msg,
@@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area)
priv->iomem = priv->bar->iomem + priv->bar_offset;
else
/* Must have been too big. Sub-allocate. */
- priv->iomem = ioremap_nocache(priv->phys, priv->size);
+ priv->iomem = ioremap(priv->phys, priv->size);
if (IS_ERR_OR_NULL(priv->iomem)) {
dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",