aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome')
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c115
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c743
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c522
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c149
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c350
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h73
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c213
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c7
29 files changed, 2478 insertions, 381 deletions
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 4ad5109059e0..bac5be4d4f43 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -20,6 +20,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 87bf784f8e8f..2805641965f3 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -16,6 +16,7 @@ nfp-objs := \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
ccm.o \
+ ccm_mbox.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -34,6 +35,11 @@ nfp-objs := \
nfp_shared_buf.o \
nic/main.o
+ifeq ($(CONFIG_TLS_DEVICE),y)
+nfp-objs += \
+ crypto/tls.o
+endif
+
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index ff3913085665..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -262,22 +262,12 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
}
}
+static LIST_HEAD(nfp_abm_block_cb_list);
+
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_abm_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
- repr);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, true);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 49749c60885e..48746c9c6224 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -247,7 +247,7 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt);
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *opt);
+ struct flow_block_offload *opt);
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index d4bf0e694541..4054b70d7719 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -623,6 +623,13 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
static void
+wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst)
+{
+ if (meta->flags & FLAG_INSN_DO_ZEXT)
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+}
+
+static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
@@ -858,7 +865,8 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
static int
-data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
+data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset,
+ u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
@@ -881,14 +889,15 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
- swreg lreg, swreg rreg, int size, enum cmd_mode mode)
+data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 dst_gpr, swreg lreg, swreg rreg, int size,
+ enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
@@ -911,33 +920,34 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
- return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
- size, CMD_MODE_32b);
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, reg_a(src_gpr),
+ offset, size, CMD_MODE_32b);
}
static int
-data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
swreg rega, regb;
addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
- return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, rega, regb,
size, CMD_MODE_40b_BA);
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -953,10 +963,12 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
- return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+ return data_ld(nfp_prog, meta, imm_b(nfp_prog), 0, size);
}
-static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+static int
+construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u8 size)
{
swreg tmp_reg;
@@ -967,7 +979,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- return data_ld(nfp_prog, tmp_reg, 0, size);
+ return data_ld(nfp_prog, meta, tmp_reg, 0, size);
}
static int
@@ -1204,7 +1216,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
if (clr_gpr && size < 8)
- wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, gpr);
while (size) {
u32 slice_end;
@@ -1305,9 +1317,10 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
- wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ wrp_alu_imm(nfp_prog, dst, alu_op, insn->imm);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -1319,7 +1332,7 @@ wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2396,12 +2409,14 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst = meta->insn.dst_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt) {
/* Set signedness bit (MSB of result). */
@@ -2410,7 +2425,7 @@ static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
}
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2425,7 +2440,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __ashr_imm(nfp_prog, dst, umin);
+ return __ashr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
/* NOTE: the first insn will set both indirect shift amount (source A)
@@ -2434,7 +2449,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2444,15 +2459,17 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __ashr_imm(nfp_prog, dst, insn->imm);
+ return __ashr_imm(nfp_prog, meta, dst, insn->imm);
}
-static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2461,7 +2478,7 @@ static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shr_imm(nfp_prog, dst, insn->imm);
+ return __shr_imm(nfp_prog, meta, dst, insn->imm);
}
static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2474,22 +2491,24 @@ static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shr_imm(nfp_prog, dst, umin);
+ return __shr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_L_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2498,7 +2517,7 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shl_imm(nfp_prog, dst, insn->imm);
+ return __shl_imm(nfp_prog, meta, dst, insn->imm);
}
static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2511,11 +2530,11 @@ static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shl_imm(nfp_prog, dst, umin);
+ return __shl_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
shl_reg64_lt32_low(nfp_prog, dst, src);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2577,34 +2596,34 @@ static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 1);
}
static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 2);
}
static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 4);
}
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 4);
}
@@ -2682,7 +2701,7 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr32(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2694,7 +2713,7 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr40(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2755,7 +2774,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
if (!len_mid) {
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
@@ -2763,7 +2782,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
if (size <= REG_WIDTH) {
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 2);
@@ -2794,10 +2813,10 @@ mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
if (size < REG_WIDTH) {
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else if (size == REG_WIDTH) {
wrp_mov(nfp_prog, dst_lo, src_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 1);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 9c136da25221..1c9fb11470df 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -160,35 +160,19 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return 0;
}
-static int nfp_bpf_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn, nn, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(nfp_bpf_block_cb_list);
static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
+ struct nfp_net *nn = netdev_priv(netdev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return nfp_bpf_setup_tc_block(netdev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &nfp_bpf_block_cb_list,
+ nfp_bpf_setup_tc_block_cb,
+ nn, nn, true);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index e54d1ac84df2..57d6ff51e980 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -238,6 +238,8 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
/* Instruction is optimized by the verifier */
#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+/* Instruction needs to zero extend to high 32-bit */
+#define FLAG_INSN_DO_ZEXT BIT(6)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT | \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 36f56eb4cbe2..e92ee510fd52 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -744,6 +744,17 @@ continue_subprog:
goto continue_subprog;
}
+static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
+ struct bpf_insn_aux_data *aux)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (aux[meta->n].zext_dst)
+ meta->flags |= FLAG_INSN_DO_ZEXT;
+ }
+}
+
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
@@ -784,6 +795,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return -EOPNOTSUPP;
}
+ nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
index 94476e41e261..71afd111bae3 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.c
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -7,9 +7,6 @@
#include "nfp_app.h"
#include "nfp_net.h"
-#define NFP_CCM_TYPE_REPLY_BIT 7
-#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
-
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index ac963b128203..a460c75522be 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -9,6 +9,7 @@
#include <linux/wait.h>
struct nfp_app;
+struct nfp_net;
/* Firmware ABI */
@@ -21,15 +22,27 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ NFP_CCM_TYPE_CRYPTO_RESET = 9,
+ NFP_CCM_TYPE_CRYPTO_ADD = 10,
+ NFP_CCM_TYPE_CRYPTO_DEL = 11,
+ NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
struct nfp_ccm_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
+ union {
+ struct {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+ };
+ __be32 raw;
+ };
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
@@ -41,15 +54,31 @@ static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
return hdr->type;
}
-static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
- return be16_to_cpu(hdr->tag);
+ return hdr->tag;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ return be16_to_cpu(__nfp_ccm_get_tag(skb));
}
+#define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16)
+#define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0)
+
+enum nfp_ccm_mbox_tlv_type {
+ NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0,
+ NFP_NET_MBOX_TLV_TYPE_END = 1,
+ NFP_NET_MBOX_TLV_TYPE_MSG = 2,
+ NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3,
+ NFP_NET_MBOX_TLV_TYPE_RESV = 4,
+};
+
/* Implementation */
/**
@@ -71,7 +100,7 @@ struct nfp_ccm {
u16 tag_alloc_last;
struct sk_buff_head replies;
- struct wait_queue_head wq;
+ wait_queue_head_t wq;
};
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
@@ -80,4 +109,23 @@ void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn);
+void nfp_ccm_mbox_free(struct nfp_net *nn);
+int nfp_ccm_mbox_init(struct nfp_net *nn);
+void nfp_ccm_mbox_clean(struct nfp_net *nn);
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags);
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical);
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size);
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
new file mode 100644
index 000000000000..f0783aa9e66e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+
+#include "ccm.h"
+#include "nfp_net.h"
+
+/* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs
+ * and copied into the mailbox. Multiple messages can be copied to
+ * form a batch. Threads come in with CMSG formed in an skb, then
+ * enqueue that skb onto the request queue. If threads skb is first
+ * in queue this thread will handle the mailbox operation. It copies
+ * up to 64 messages into the mailbox (making sure that both requests
+ * and replies will fit. After FW is done processing the batch it
+ * copies the data out and wakes waiting threads.
+ * If a thread is waiting it either gets its the message completed
+ * (response is copied into the same skb as the request, overwriting
+ * it), or becomes the first in queue.
+ * Completions and next-to-run are signaled via the control buffer
+ * to limit potential cache line bounces.
+ */
+
+#define NFP_CCM_MBOX_BATCH_LIMIT 64
+#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
+#define NFP_CCM_MAX_QLEN 1024
+
+enum nfp_net_mbox_cmsg_state {
+ NFP_NET_MBOX_CMSG_STATE_QUEUED,
+ NFP_NET_MBOX_CMSG_STATE_NEXT,
+ NFP_NET_MBOX_CMSG_STATE_BUSY,
+ NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
+ NFP_NET_MBOX_CMSG_STATE_DONE,
+};
+
+/**
+ * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
+ * @state: processing state (/stage) of the message
+ * @err: error encountered during processing if any
+ * @max_len: max(request_len, reply_len)
+ * @exp_reply: expected reply length (0 means don't validate)
+ * @posted: the message was posted and nobody waits for the reply
+ */
+struct nfp_ccm_mbox_cmsg_cb {
+ enum nfp_net_mbox_cmsg_state state;
+ int err;
+ unsigned int max_len;
+ unsigned int exp_reply;
+ bool posted;
+};
+
+static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
+{
+ return round_down(nn->tlv_caps.mbox_len, 4) -
+ NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
+ 4 * 2; /* Msg TLV plus End TLV headers */
+}
+
+static void
+nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
+ cb->err = 0;
+ cb->max_len = max_len;
+ cb->exp_reply = exp_reply;
+ cb->posted = false;
+}
+
+static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->max_len;
+}
+
+static bool nfp_ccm_mbox_done(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
+}
+
+static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
+ cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
+}
+
+static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->posted;
+}
+
+static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->posted = true;
+}
+
+static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
+{
+ return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
+}
+
+static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (!skb)
+ return;
+
+ cb = (void *)skb->cb;
+ cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
+ if (cb->posted)
+ queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
+}
+
+static void
+nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
+{
+ nn_writel(nn, off,
+ FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
+ FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
+}
+
+static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+ int reserve, i, cnt;
+ __be32 *data;
+ u32 off, len;
+
+ off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
+ skb->len);
+ off += 4;
+
+ /* Write data word by word, skb->data should be aligned */
+ data = (__be32 *)skb->data;
+ cnt = skb->len / 4;
+ for (i = 0 ; i < cnt; i++) {
+ nn_writel(nn, off, be32_to_cpu(data[i]));
+ off += 4;
+ }
+ if (skb->len & 3) {
+ __be32 tmp = 0;
+
+ memcpy(&tmp, &data[i], skb->len & 3);
+ nn_writel(nn, off, be32_to_cpu(tmp));
+ off += 4;
+ }
+
+ /* Reserve space if reply is bigger */
+ len = round_up(skb->len, 4);
+ reserve = nfp_ccm_mbox_maxlen(skb) - len;
+ if (reserve > 0) {
+ nfp_ccm_mbox_write_tlv(nn, off,
+ NFP_NET_MBOX_TLV_TYPE_RESV,
+ reserve);
+ off += 4 + reserve;
+ }
+
+ if (skb == last)
+ break;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
+}
+
+static struct sk_buff *
+nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ if (__nfp_ccm_get_tag(skb) == tag)
+ return skb;
+
+ if (skb == last)
+ return NULL;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+}
+
+static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ u8 __iomem *data, *end;
+ struct sk_buff *skb;
+
+ data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ end = data + nn->tlv_caps.mbox_len;
+
+ while (true) {
+ unsigned int length, offset, type;
+ struct nfp_ccm_hdr hdr;
+ u32 tlv_hdr;
+
+ tlv_hdr = readl(data);
+ type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
+ length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
+ offset = data - nn->dp.ctrl_bar;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (data + length > end) {
+ nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_END)
+ break;
+ if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
+ goto next_tlv;
+ if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
+ type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (length < 4) {
+ nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ hdr.raw = cpu_to_be32(readl(data));
+
+ skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
+ if (!skb) {
+ nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
+ be16_to_cpu(hdr.tag));
+ break;
+ }
+ cb = (void *)skb->cb;
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp,
+ "mailbox msg not supported type:%d\n",
+ nfp_ccm_get_type(skb));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
+ hdr.type,
+ __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (cb->exp_reply && length != cb->exp_reply) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
+ hdr.type, length, cb->exp_reply);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (length > cb->max_len) {
+ nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
+ hdr.type, cb->max_len, length);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (!cb->posted) {
+ __be32 *skb_data;
+ int i, cnt;
+
+ if (length <= skb->len)
+ __skb_trim(skb, length);
+ else
+ skb_put(skb, length - skb->len);
+
+ /* We overcopy here slightly, but that's okay,
+ * the skb is large enough, and the garbage will
+ * be ignored (beyond skb->len).
+ */
+ skb_data = (__be32 *)skb->data;
+ memcpy(skb_data, &hdr, 4);
+
+ cnt = DIV_ROUND_UP(length, 4);
+ for (i = 1 ; i < cnt; i++)
+ skb_data[i] = cpu_to_be32(readl(data + i * 4));
+ }
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
+next_tlv:
+ data += round_up(length, 4);
+ if (data + 4 > end) {
+ nn_dp_warn(&nn->dp,
+ "reached end of MBOX without END TLV\n");
+ break;
+ }
+ }
+
+ smp_wmb(); /* order the skb->data vs. cb->state */
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
+ cb->err = -ENOENT;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ }
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+
+ if (cb->posted) {
+ if (cb->err)
+ nn_dp_warn(&nn->dp,
+ "mailbox posted msg failed type:%u err:%d\n",
+ nfp_ccm_get_type(skb), cb->err);
+ dev_consume_skb_any(skb);
+ }
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void
+nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ cb->err = err;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct sk_buff *skb, *last;
+ int cnt, err;
+
+ space -= 4; /* for End TLV */
+
+ /* First skb must fit, because it's ours and we checked it fits */
+ cnt = 1;
+ last = skb = __skb_peek(&nn->mbox_cmsg.queue);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+
+ while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+ if (space < 0)
+ break;
+ last = skb;
+ nfp_ccm_mbox_set_busy(skb);
+ cnt++;
+ if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
+ break;
+ }
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ /* Now we own all skb's marked in progress, new requests may arrive
+ * at the end of the queue.
+ */
+
+ nn_ctrl_bar_lock(nn);
+
+ nfp_ccm_mbox_copy_in(nn, last);
+
+ err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, last);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
+
+ nn_ctrl_bar_unlock(nn);
+
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ if (cb->err)
+ dev_kfree_skb_any(skb);
+ return cb->err;
+}
+
+/* If wait timed out but the command is already in progress we have
+ * to wait until it finishes. Runners has ownership of the skbs marked
+ * as busy.
+ */
+static int
+nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ bool was_first;
+
+ if (nfp_ccm_mbox_in_progress(skb)) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
+ smp_rmb(); /* pairs with smp_wmb() after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ was_first = nfp_ccm_mbox_should_run(nn, skb);
+ __skb_unlink(skb, &nn->mbox_cmsg.queue);
+ if (was_first)
+ nfp_ccm_mbox_mark_next_runner(nn);
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (was_first)
+ wake_up_all(&nn->mbox_cmsg.wq);
+
+ nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
+ type);
+ return -ETIMEDOUT;
+}
+
+static int
+nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size, unsigned int max_reply_size,
+ gfp_t flags)
+{
+ const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
+ unsigned int max_len;
+ ssize_t undersize;
+ int err;
+
+ if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
+ nn_dp_warn(&nn->dp,
+ "message type %d not supported by mailbox\n", type);
+ return -EINVAL;
+ }
+
+ /* If the reply size is unknown assume it will take the entire
+ * mailbox, the callers should do their best for this to never
+ * happen.
+ */
+ if (!max_reply_size)
+ max_reply_size = mbox_max;
+ max_reply_size = round_up(max_reply_size, 4);
+
+ /* Make sure we can fit the entire reply into the skb,
+ * and that we don't have to slow down the mbox handler
+ * with allocations.
+ */
+ undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
+ if (undersize > 0) {
+ err = pskb_expand_head(skb, 0, undersize, flags);
+ if (err) {
+ nn_dp_warn(&nn->dp,
+ "can't allocate reply buffer for mailbox\n");
+ return err;
+ }
+ }
+
+ /* Make sure that request and response both fit into the mailbox */
+ max_len = max(max_reply_size, round_up(skb->len, 4));
+ if (max_len > mbox_max) {
+ nn_dp_warn(&nn->dp,
+ "message too big for tha mailbox: %u/%u vs %u\n",
+ skb->len, max_reply_size, mbox_max);
+ return -EMSGSIZE;
+ }
+
+ nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
+
+ return 0;
+}
+
+static int
+nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, bool critical)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ assert_spin_locked(&nn->mbox_cmsg.queue.lock);
+
+ if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
+ nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
+ return -EBUSY;
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
+
+ __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
+
+ return 0;
+}
+
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
+ max_reply_size, GFP_KERNEL);
+ if (err)
+ goto err_free_skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
+ if (err)
+ goto err_unlock;
+
+ /* First in queue takes the mailbox lock and processes the batch */
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ bool to;
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ to = !wait_event_timeout(nn->mbox_cmsg.wq,
+ nfp_ccm_mbox_done(skb) ||
+ nfp_ccm_mbox_should_run(nn, skb),
+ msecs_to_jiffies(NFP_CCM_TIMEOUT));
+
+ /* fast path for those completed by another thread */
+ if (nfp_ccm_mbox_done(skb)) {
+ smp_rmb(); /* pairs with wmb after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ WARN_ON(!to);
+
+ err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
+ if (err)
+ goto err_free_skb;
+ return 0;
+ }
+ }
+
+ /* run queue expects the lock held */
+ nfp_ccm_mbox_run_queue_unlock(nn);
+ return nfp_ccm_mbox_skb_return(skb);
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size)
+{
+ return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
+ max_reply_size, false);
+}
+
+static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
+ !nfp_ccm_mbox_should_run(nn, skb))) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+ return;
+ }
+
+ nfp_ccm_mbox_run_queue_unlock(nn);
+}
+
+static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+ int err;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
+ /* Should never happen so it's unclear what to do here.. */
+ goto exit_unlock_wake;
+
+ err = nfp_net_mbox_reconfig_wait_posted(nn);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, skb);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
+exit_unlock_wake:
+ nn_ctrl_bar_unlock(nn);
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
+ GFP_ATOMIC);
+ if (err)
+ goto err_free_skb;
+
+ nfp_ccm_mbox_mark_posted(skb);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
+ if (err)
+ goto err_unlock;
+
+ if (nfp_ccm_mbox_is_first(nn, skb)) {
+ if (nn_ctrl_bar_trylock(nn)) {
+ nfp_ccm_mbox_copy_in(nn, skb);
+ nfp_net_mbox_reconfig_post(nn,
+ NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ queue_work(nn->mbox_cmsg.workq,
+ &nn->mbox_cmsg.wait_work);
+ } else {
+ nfp_ccm_mbox_mark_next_runner(nn);
+ }
+ }
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags)
+{
+ unsigned int max_size;
+ struct sk_buff *skb;
+
+ if (!reply_size)
+ max_size = nfp_ccm_mbox_max_msg(nn);
+ else
+ max_size = max(req_size, reply_size);
+ max_size = round_up(max_size, 4);
+
+ skb = alloc_skb(max_size, flags);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, req_size);
+
+ return skb;
+}
+
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
+{
+ return nfp_ccm_mbox_max_msg(nn) >= size;
+}
+
+int nfp_ccm_mbox_init(struct nfp_net *nn)
+{
+ return 0;
+}
+
+void nfp_ccm_mbox_clean(struct nfp_net *nn)
+{
+ drain_workqueue(nn->mbox_cmsg.workq);
+}
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn)
+{
+ skb_queue_head_init(&nn->mbox_cmsg.queue);
+ init_waitqueue_head(&nn->mbox_cmsg.wq);
+ INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
+ INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
+
+ nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
+ if (!nn->mbox_cmsg.workq)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfp_ccm_mbox_free(struct nfp_net *nn)
+{
+ destroy_workqueue(nn->mbox_cmsg.workq);
+ WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
new file mode 100644
index 000000000000..60372ddf69f0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_H
+#define NFP_CRYPTO_H 1
+
+struct nfp_net_tls_offload_ctx {
+ __be32 fw_handle[2];
+
+ u8 rx_end[0];
+ /* Tx only fields follow - Rx side does not have enough driver state
+ * to fit these
+ */
+
+ u32 next_seq;
+};
+
+#ifdef CONFIG_TLS_DEVICE
+int nfp_net_tls_init(struct nfp_net *nn);
+#else
+static inline int nfp_net_tls_init(struct nfp_net *nn)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
new file mode 100644
index 000000000000..67413d946c4a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_FW_H
+#define NFP_CRYPTO_FW_H 1
+
+#include "../ccm.h"
+
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+
+struct nfp_crypto_reply_simple {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+};
+
+struct nfp_crypto_req_reset {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+};
+
+#define NFP_NET_TLS_IPVER GENMASK(15, 12)
+#define NFP_NET_TLS_VLAN GENMASK(11, 0)
+#define NFP_NET_TLS_VLAN_UNUSED 4095
+
+struct nfp_crypto_req_add_front {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ u8 key_len;
+ __be16 ipver_vlan __packed;
+ u8 l4_proto;
+#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
+ u8 l3_addrs[0];
+};
+
+struct nfp_crypto_req_add_back {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 key[8];
+ __be32 salt;
+ __be32 iv[2];
+ __be32 counter;
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+
+struct nfp_crypto_req_add_v4 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip;
+ __be32 dst_ip;
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_req_add_v6 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_reply_add {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_del {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_update {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ __be32 handle[2];
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
new file mode 100644
index 000000000000..96a96b35c0ca
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <net/tls.h>
+
+#include "../ccm.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+#include "fw.h"
+
+#define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
+ (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
+
+#define NFP_NET_TLS_OPCODE_MASK_RX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
+
+#define NFP_NET_TLS_OPCODE_MASK_TX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
+
+#define NFP_NET_TLS_OPCODE_MASK \
+ (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
+
+static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
+{
+ u32 off, val;
+
+ off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
+
+ val = nn_readl(nn, off);
+ if (on)
+ val |= BIT(opcode & 31);
+ else
+ val &= ~BIT(opcode & 31);
+ nn_writel(nn, off, val);
+}
+
+static bool
+__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 opcode;
+ int cnt;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ nn->ktls_tx_conn_cnt += add;
+ cnt = nn->ktls_tx_conn_cnt;
+ nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
+ } else {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ nn->ktls_rx_conn_cnt += add;
+ cnt = nn->ktls_rx_conn_cnt;
+ }
+
+ /* Care only about 0 -> 1 and 1 -> 0 transitions */
+ if (cnt > 1)
+ return false;
+
+ nfp_net_crypto_set_op(nn, opcode, cnt);
+ return true;
+}
+
+static int
+nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ int ret = 0;
+
+ /* Use the BAR lock to protect the connection counts */
+ nn_ctrl_bar_lock(nn);
+ if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ /* Undo the cnt adjustment if failed */
+ if (ret)
+ __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
+ }
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+static int
+nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
+}
+
+static int
+nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
+}
+
+static struct sk_buff *
+nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
+{
+ return nfp_ccm_mbox_msg_alloc(nn, req_sz,
+ sizeof(struct nfp_crypto_reply_simple),
+ flags);
+}
+
+static int
+nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
+ const char *name, enum nfp_ccm_type type)
+{
+ struct nfp_crypto_reply_simple *reply;
+ int err;
+
+ err = __nfp_ccm_mbox_communicate(nn, skb, type,
+ sizeof(*reply), sizeof(*reply),
+ type == NFP_CCM_TYPE_CRYPTO_DEL);
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
+ return err;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err)
+ nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
+ name, err);
+ dev_consume_skb_any(skb);
+
+ return err;
+}
+
+static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
+{
+ struct nfp_crypto_req_del *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ memcpy(req->handle, fw_handle, sizeof(req->handle));
+
+ nfp_net_tls_communicate_simple(nn, skb, "delete",
+ NFP_CCM_TYPE_CRYPTO_DEL);
+}
+
+static void
+nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
+{
+ front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+}
+
+static void
+nfp_net_tls_assign_conn_id(struct nfp_net *nn,
+ struct nfp_crypto_req_add_front *front)
+{
+ u32 len;
+ u64 id;
+
+ id = atomic64_inc_return(&nn->ktls_conn_id_gen);
+ len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
+
+ memcpy(front->l3_addrs, &id, sizeof(id));
+ memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
+ struct sock *sk, int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ req->front.key_len += sizeof(__be32) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ req->src_ip = inet->inet_daddr;
+ req->dst_ip = inet->inet_saddr;
+ }
+
+ return &req->back;
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
+ struct sock *sk, int direction)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ req->front.key_len += sizeof(struct in6_addr) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
+ }
+
+#endif
+ return &req->back;
+}
+
+static void
+nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
+ struct nfp_crypto_req_add_back *back, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ front->l4_proto = IPPROTO_TCP;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ back->src_port = 0;
+ back->dst_port = 0;
+ } else {
+ back->src_port = inet->inet_dport;
+ back->dst_port = inet->inet_sport;
+ }
+}
+
+static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
+{
+ switch (direction) {
+ case TLS_OFFLOAD_CTX_DIR_TX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ case TLS_OFFLOAD_CTX_DIR_RX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static bool
+nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 bit;
+
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ else
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ break;
+ default:
+ return false;
+ }
+
+ return nn->tlv_caps.crypto_ops & BIT(bit);
+}
+
+static int
+nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *tls_ci;
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_crypto_req_add_front *front;
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_add_back *back;
+ struct nfp_crypto_reply_add *reply;
+ struct sk_buff *skb;
+ size_t req_sz;
+ void *req;
+ bool ipv6;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
+ TLS_DRIVER_STATE_SIZE_TX);
+ BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
+ TLS_DRIVER_STATE_SIZE_RX);
+
+ if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
+ return -EOPNOTSUPP;
+
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ req_sz = sizeof(struct nfp_crypto_req_add_v6);
+ ipv6 = true;
+ break;
+ }
+#endif
+ /* fall through */
+ case AF_INET:
+ req_sz = sizeof(struct nfp_crypto_req_add_v4);
+ ipv6 = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_net_tls_conn_add(nn, direction);
+ if (err)
+ return err;
+
+ skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_conn_remove;
+ }
+
+ front = (void *)skb->data;
+ front->ep_id = 0;
+ front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
+ front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(front->resv, 0, sizeof(front->resv));
+
+ nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
+
+ req = (void *)skb->data;
+ if (ipv6)
+ back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
+ else
+ back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
+
+ nfp_net_tls_set_l4(front, back, sk, direction);
+
+ back->counter = 0;
+ back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
+
+ tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
+ sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
+
+ /* Get an extra ref on the skb so we can wipe the key after */
+ skb_get(skb);
+
+ err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
+ sizeof(*reply), sizeof(*reply));
+ reply = (void *)skb->data;
+
+ /* We depend on CCM MBOX code not reallocating skb we sent
+ * so we can clear the key material out of the memory.
+ */
+ if (!WARN_ON_ONCE((u8 *)back < skb->head ||
+ (u8 *)back > skb_end_pointer(skb)) &&
+ !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
+ memzero_explicit(back, sizeof(*back));
+ dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
+
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
+ err, direction == TLS_OFFLOAD_CTX_DIR_TX);
+ /* communicate frees skb on error */
+ goto err_conn_remove;
+ }
+
+ err = -be32_to_cpu(reply->error);
+ if (err) {
+ if (err == -ENOSPC) {
+ if (!atomic_fetch_inc(&nn->ktls_no_space))
+ nn_info(nn, "HW TLS table full\n");
+ } else {
+ nn_dp_warn(&nn->dp,
+ "failed to add TLS, FW replied: %d\n", err);
+ }
+ goto err_free_skb;
+ }
+
+ if (!reply->handle[0] && !reply->handle[1]) {
+ nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
+ err = -EINVAL;
+ goto err_fw_remove;
+ }
+
+ ntls = tls_driver_ctx(sk, direction);
+ memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ ntls->next_seq = start_offload_tcp_sn;
+ dev_consume_skb_any(skb);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return 0;
+
+ tls_offload_rx_resync_set_type(sk,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ return 0;
+
+err_fw_remove:
+ nfp_net_tls_del_fw(nn, reply->handle);
+err_free_skb:
+ dev_consume_skb_any(skb);
+err_conn_remove:
+ nfp_net_tls_conn_remove(nn, direction);
+ return err;
+}
+
+static void
+nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+
+ nfp_net_tls_conn_remove(nn, direction);
+
+ ntls = __tls_driver_ctx(tls_ctx, direction);
+ nfp_net_tls_del_fw(nn, ntls->fw_handle);
+}
+
+static int
+nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_update *req;
+ struct sk_buff *skb;
+ gfp_t flags;
+ int err;
+
+ flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
+ if (!skb)
+ return -ENOMEM;
+
+ ntls = tls_driver_ctx(sk, direction);
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(req->resv, 0, sizeof(req->resv));
+ memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ req->tcp_seq = cpu_to_be32(seq);
+ memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync",
+ NFP_CCM_TYPE_CRYPTO_UPDATE);
+ if (err)
+ return err;
+ ntls->next_seq = seq;
+ } else {
+ nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ sizeof(struct nfp_crypto_reply_simple));
+ }
+
+ return 0;
+}
+
+static const struct tlsdev_ops nfp_net_tls_ops = {
+ .tls_dev_add = nfp_net_tls_add,
+ .tls_dev_del = nfp_net_tls_del,
+ .tls_dev_resync = nfp_net_tls_resync,
+};
+
+static int nfp_net_tls_reset(struct nfp_net *nn)
+{
+ struct nfp_crypto_req_reset *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+
+ return nfp_net_tls_communicate_simple(nn, skb, "reset",
+ NFP_CCM_TYPE_CRYPTO_RESET);
+}
+
+int nfp_net_tls_init(struct nfp_net *nn)
+{
+ struct net_device *netdev = nn->dp.netdev;
+ int err;
+
+ if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
+ return 0;
+
+ if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
+ NFP_NET_TLS_CCM_MBOX_OPS_MASK)
+ return 0;
+
+ if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
+ nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
+ nn->tlv_caps.mbox_len);
+ return 0;
+ }
+
+ err = nfp_net_tls_reset(nn);
+ if (err)
+ return err;
+
+ nn_ctrl_bar_lock(nn);
+ nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
+ err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ nn_ctrl_bar_unlock(nn);
+ if (err)
+ return err;
+
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ }
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ }
+
+ netdev->tlsdev_ops = &nfp_net_tls_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c56e31d9f8a4..5a54fe848de4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -54,7 +54,8 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
static int
nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
- struct nfp_fl_payload *nfp_flow, int act_len)
+ struct nfp_fl_payload *nfp_flow, int act_len,
+ struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
struct nfp_fl_pre_lag *pre_lag;
@@ -65,8 +66,10 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
- if (act_len + act_size > NFP_FL_MAX_A_SIZ)
+ if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
return -EOPNOTSUPP;
+ }
/* Pre_lag action must be first on action list.
* If other actions already exist they need pushed forward.
@@ -76,7 +79,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
nfp_flow->action_data, act_len);
pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
- err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
+ err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
if (err)
return err;
@@ -93,7 +96,8 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
- enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
+ enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
+ struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -104,18 +108,24 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
out_dev = act->dev;
- if (!out_dev)
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
return -EOPNOTSUPP;
+ }
tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
if (tun_type) {
/* Verify the egress netdev matches the tunnel type. */
- if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
return -EOPNOTSUPP;
+ }
- if (*tun_out_cnt)
+ if (*tun_out_cnt) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
return -EOPNOTSUPP;
+ }
(*tun_out_cnt)++;
output->flags = cpu_to_be16(tmp_flags |
@@ -127,8 +137,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->flags = cpu_to_be16(tmp_flags);
gid = nfp_flower_lag_get_output_id(app, out_dev);
- if (gid < 0)
+ if (gid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
return gid;
+ }
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else {
/* Set action output parameters. */
@@ -136,29 +148,58 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
- if (!netdev_port_same_parent_id(in_dev, out_dev))
+ if (!netdev_port_same_parent_id(in_dev, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
return -EOPNOTSUPP;
+ }
}
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ if (!nfp_netdev_is_nfp_repr(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
return -EOPNOTSUPP;
+ }
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ if (!output->port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
return -EOPNOTSUPP;
+ }
}
nfp_flow->meta.shortcut = output->port;
return 0;
}
+static bool
+nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
+{
+ struct flow_action_entry *act = flow->rule->action.entries;
+ int num_act = flow->rule->action.num_entries;
+ int act_idx;
+
+ /* Preparse action list for next mirred or redirect action */
+ for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
+ if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
+ act[act_idx].id == FLOW_ACTION_MIRRED)
+ return netif_is_gretap(act[act_idx].dev);
+
+ return false;
+}
+
static enum nfp_flower_tun_type
-nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
- const struct flow_action_entry *act)
+nfp_fl_get_tun_from_act(struct nfp_app *app,
+ struct flow_cls_offload *flow,
+ const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
+ /* Determine the tunnel type based on the egress netdev
+ * in the mirred action for tunnels without l4.
+ */
+ if (nfp_flower_tun_is_gre(flow, act_idx))
+ return NFP_FL_TUNNEL_GRE;
+
switch (tun->key.tp_dst) {
case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
@@ -194,7 +235,8 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
static int
nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
int opt_len, opt_cnt, act_start, tot_push_len;
@@ -212,20 +254,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
struct geneve_opt *opt = (struct geneve_opt *)src;
opt_cnt++;
- if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
return -EOPNOTSUPP;
+ }
tot_push_len += sizeof(struct nfp_fl_push_geneve) +
opt->length * 4;
- if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
}
- if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
act_start = *list_len;
*list_len += tot_push_len;
@@ -256,14 +304,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
- struct nfp_fl_set_ipv4_udp_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev)
+nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
@@ -275,8 +322,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
- !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
return -EOPNOTSUPP;
+ }
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -316,8 +365,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
+ }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -345,18 +396,22 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
static int
nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_eth *set_eth)
+ struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off + 4 > ETH_ALEN * 2)
+ if (off + 4 > ETH_ALEN * 2) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
&set_eth->eth_addr_mask[off]);
@@ -377,7 +432,8 @@ struct ipv4_ttl_word {
static int
nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ip4_addrs *set_ip_addr,
- struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
+ struct netlink_ext_ack *extack)
{
struct ipv4_ttl_word *ttl_word_mask;
struct ipv4_ttl_word *ttl_word;
@@ -389,8 +445,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
return -EOPNOTSUPP;
+ }
switch (off) {
case offsetof(struct iphdr, daddr):
@@ -413,8 +471,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
ttl_word_mask = (struct ipv4_ttl_word *)&mask;
ttl_word = (struct ipv4_ttl_word *)&exact;
- if (ttl_word_mask->protocol || ttl_word_mask->check)
+ if (ttl_word_mask->protocol || ttl_word_mask->check) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
@@ -429,8 +489,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
tos_word = (struct iphdr *)&exact;
if (tos_word_mask->version || tos_word_mask->ihl ||
- tos_word_mask->tot_len)
+ tos_word_mask->tot_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
@@ -441,6 +503,7 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
NFP_FL_LW_SIZ;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
return -EOPNOTSUPP;
}
@@ -468,7 +531,8 @@ struct ipv6_hop_limit_word {
static int
nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
struct ipv6_hop_limit_word *fl_hl_mask;
struct ipv6_hop_limit_word *fl_hl;
@@ -478,8 +542,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
fl_hl = (struct ipv6_hop_limit_word *)&exact;
- if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
@@ -488,8 +554,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK)
+ exact & ~IPV6_FLOW_LABEL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_label_mask |= mask;
ip_hl_fl->ipv6_label &= ~mask;
@@ -507,7 +575,8 @@ static int
nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
struct nfp_fl_set_ipv6_addr *ip_src,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
__be32 exact, mask;
int err = 0;
@@ -517,12 +586,14 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
return -EOPNOTSUPP;
+ }
if (off < offsetof(struct ipv6hdr, saddr)) {
err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
- ip_hl_fl);
+ ip_hl_fl, extack);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -533,6 +604,7 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
return -EOPNOTSUPP;
}
@@ -541,18 +613,23 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
static int
nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_tport *set_tport, int opcode)
+ struct nfp_fl_set_tport *set_tport, int opcode,
+ struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off)
+ if (off) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
set_tport->tp_port_mask);
@@ -592,11 +669,11 @@ struct nfp_flower_pedit_acts {
};
static int
-nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
int *a_len, struct nfp_flower_pedit_acts *set_act,
u32 *csum_updated)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
@@ -694,8 +771,9 @@ nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
static int
nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
- u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+ struct flow_cls_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
+ struct netlink_ext_ack *extack)
{
enum flow_action_mangle_base htype;
u32 offset;
@@ -705,21 +783,22 @@ nfp_fl_pedit(const struct flow_action_entry *act,
switch (htype) {
case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
- &set_act->set_ip_ttl_tos);
+ &set_act->set_ip_ttl_tos, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
&set_act->set_ip6_src,
- &set_act->set_ip6_tc_hl_fl);
+ &set_act->set_ip6_tc_hl_fl, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
return nfp_fl_set_tport(act, offset, &set_act->set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
+ NFP_FL_ACTION_OPCODE_SET_TCP, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
return nfp_fl_set_tport(act, offset, &set_act->set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
+ NFP_FL_ACTION_OPCODE_SET_UDP, extack);
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
return -EOPNOTSUPP;
}
}
@@ -730,7 +809,8 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
@@ -739,15 +819,19 @@ nfp_flower_output_action(struct nfp_app *app,
/* If csum_updated has not been reset by now, it means HW will
* incorrectly update csums when they are not requested.
*/
- if (*csum_updated)
+ if (*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
return -EOPNOTSUPP;
+ }
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
return -EOPNOTSUPP;
+ }
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt);
+ tun_out_cnt, extack);
if (err)
return err;
@@ -757,11 +841,13 @@ nfp_flower_output_action(struct nfp_app *app,
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
- prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
- if (prelag_size < 0)
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
+ if (prelag_size < 0) {
return prelag_size;
- else if (prelag_size > 0 && (!last || *out_cnt))
+ } else if (prelag_size > 0 && (!last || *out_cnt)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
return -EOPNOTSUPP;
+ }
*a_len += prelag_size;
}
@@ -772,14 +858,15 @@ nfp_flower_output_action(struct nfp_app *app,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
- struct nfp_flower_pedit_acts *set_act)
+ struct nfp_flower_pedit_acts *set_act,
+ struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_udp_tun *set_tun;
+ struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
@@ -792,20 +879,23 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, extack);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, extack);
if (err)
return err;
break;
case FLOW_ACTION_VLAN_POP:
- if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
return -EOPNOTSUPP;
+ }
pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
@@ -814,8 +904,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*a_len += sizeof(struct nfp_fl_pop_vlan);
break;
case FLOW_ACTION_VLAN_PUSH:
- if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
return -EOPNOTSUPP;
+ }
psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
@@ -826,35 +919,41 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
- if (*tun_type == NFP_FL_TUNNEL_NONE)
+ *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
+ if (*tun_type == NFP_FL_TUNNEL_NONE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
+ }
- if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
return -EOPNOTSUPP;
+ }
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
+ }
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
if (err)
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev);
+ err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
+ *tun_type, netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
@@ -862,13 +961,15 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated, set_act))
+ a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
/* csum action requests recalc of something we have not fixed */
- if (act->csum_flags & ~*csum_updated)
+ if (act->csum_flags & ~*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
return -EOPNOTSUPP;
+ }
/* If we will correctly fix the csum we can remove it from the
* csum update list. Which will later be used to check support.
*/
@@ -876,6 +977,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
break;
default:
/* Currently we do not handle any other actions. */
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
return -EOPNOTSUPP;
}
@@ -919,9 +1021,10 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
}
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
struct nfp_flower_pedit_acts set_act;
@@ -942,7 +1045,8 @@ int nfp_flower_compile_action(struct nfp_app *app,
memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated, &set_act);
+ &out_cnt, &csum_updated,
+ &set_act, extack, i);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 537f7fc19584..0f1706ae5bfc 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/gre.h>
#include <net/vxlan.h>
#include "../nfp_app.h"
@@ -22,6 +23,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
+#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
@@ -37,6 +39,9 @@
#define NFP_FL_IP_FRAG_FIRST BIT(7)
#define NFP_FL_IP_FRAGMENTED BIT(6)
+/* GRE Tunnel flags */
+#define NFP_FL_GRE_FLAG_KEY BIT(2)
+
/* Compressed HW representation of TCP Flags */
#define NFP_FL_TCP_FLAG_URG BIT(4)
#define NFP_FL_TCP_FLAG_PSH BIT(3)
@@ -107,6 +112,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_GRE = 1,
NFP_FL_TUNNEL_VXLAN = 2,
NFP_FL_TUNNEL_GENEVE = 4,
};
@@ -203,7 +209,7 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_ipv4_udp_tun {
+struct nfp_fl_set_ipv4_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -354,6 +360,16 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+struct nfp_flower_tun_ipv4 {
+ __be32 src;
+ __be32 dst;
+};
+
+struct nfp_flower_tun_ip_ext {
+ u8 tos;
+ u8 ttl;
+};
+
/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
@@ -371,15 +387,42 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4_udp_tun {
- __be32 ip_src;
- __be32 ip_dst;
+ struct nfp_flower_tun_ipv4 ipv4;
__be16 reserved1;
- u8 tos;
- u8 ttl;
+ struct nfp_flower_tun_ip_ext ip_ext;
__be32 reserved2;
__be32 tun_id;
};
+/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct nfp_flower_ipv4_gre_tun {
+ struct nfp_flower_tun_ipv4 ipv4;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -530,6 +573,8 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_gretap(netdev))
+ return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
@@ -546,6 +591,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_geneve(netdev))
return true;
+ if (netif_is_gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 5db838f45694..63907aeb3884 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -156,7 +156,8 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act)
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
@@ -167,6 +168,7 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
master);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 40957a8dbfe6..af9441d5787f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -343,19 +343,22 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type);
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack);
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev);
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack);
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app,
@@ -389,7 +392,8 @@ int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act);
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
void nfp_flower_qos_init(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index bfa4bf34911d..9cc3ba17ff69 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,9 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct tc_cls_flower_offload *flow, u8 key_type)
+ struct flow_cls_offload *flow, u8 key_type)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -54,7 +54,8 @@ nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version, enum nfp_flower_tun_type tun_type)
+ bool mask_version, enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
@@ -64,8 +65,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else {
- if (!cmsg_port)
+ if (!cmsg_port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
return -EOPNOTSUPP;
+ }
frame->in_port = cpu_to_be32(cmsg_port);
}
@@ -75,9 +78,9 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -127,9 +130,9 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -148,9 +151,9 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct nfp_flower_ip_ext *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -222,9 +225,9 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -244,9 +247,9 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -266,7 +269,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
static int
nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct flow_match_enc_opts match;
@@ -278,11 +281,76 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
}
static void
+nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
+ struct nfp_flower_tun_ipv4 *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
+nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
+ struct nfp_flower_tun_ip_ext *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ ext->tun_key = match.key->keyid;
+ msk->tun_key = match.mask->keyid;
+
+ ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ }
+
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+}
+
+static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -298,33 +366,17 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
msk->tun_id = cpu_to_be32(temp_vni);
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- struct flow_match_ipv4_addrs match;
-
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- ext->ip_src = match.key->src;
- ext->ip_dst = match.key->dst;
- msk->ip_src = match.mask->src;
- msk->ip_dst = match.mask->dst;
- }
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_enc_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
- }
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type)
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
u32 port_id;
int err;
@@ -357,13 +409,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- port_id, false, tun_type);
+ port_id, false, tun_type, extack);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- port_id, true, tun_type);
+ port_id, true, tun_type, extack);
if (err)
return err;
@@ -402,12 +454,27 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ __be32 tun_dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
+ tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3d326efdc814..7c4a15e967df 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -290,9 +290,10 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
}
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
{
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
@@ -302,8 +303,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
int err;
err = nfp_get_stats_entry(app, &stats_cxt);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
return err;
+ }
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
@@ -328,6 +331,12 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_rhash;
+ }
err = -ENOENT;
goto err_remove_rhash;
}
@@ -343,6 +352,21 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
+
+ if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ NULL, &new_mask_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
+
err = -EEXIST;
goto err_remove_mask;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 1fbfeb43c538..e209f150c5f2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,7 @@
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
@@ -122,9 +121,9 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
-static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
+static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
@@ -132,14 +131,25 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
+static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
+}
+
static int
-nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size)
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size,
+ struct netlink_ext_ack *extack)
{
- if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
+ }
- if (enc_opts->key->len > 0) {
+ if (enc_opts->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
@@ -148,13 +158,65 @@ nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
}
static int
+nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
+ struct flow_dissector_key_enc_opts *enc_op,
+ u32 *key_layer_two, u8 *key_layer, int *key_size,
+ struct nfp_flower_priv *priv,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ switch (enc_ports->dst) {
+ case htons(IANA_VXLAN_UDP_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ *key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case htons(GENEVE_UDP_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
+ return -EOPNOTSUPP;
+ }
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
+ return -EOPNOTSUPP;
+ }
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
+ key_size, extack);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow,
- enum nfp_flower_tun_type *tun_type)
+ struct flow_cls_offload *flow,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
@@ -163,14 +225,18 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
int key_size;
int err;
- if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
+ if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
return -EOPNOTSUPP;
+ }
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
- != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
return -EOPNOTSUPP;
+ }
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT;
@@ -188,8 +254,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_vlan(rule, &vlan);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
- vlan.key->vlan_priority)
+ vlan.key->vlan_priority) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -200,56 +268,68 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
- if (enc_ctl.mask->addr_type != 0xffff ||
- enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ if (enc_ctl.mask->addr_type != 0xffff) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
+ return -EOPNOTSUPP;
+ }
+ if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
return -EOPNOTSUPP;
+ }
/* These fields are already verified as used. */
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
- return -EOPNOTSUPP;
-
- flow_rule_match_enc_ports(rule, &enc_ports);
- if (enc_ports.mask->dst != cpu_to_be16(~0))
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
return -EOPNOTSUPP;
+ }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
- switch (enc_ports.key->dst) {
- case htons(IANA_VXLAN_UDP_PORT):
- *tun_type = NFP_FL_TUNNEL_VXLAN;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (enc_op.key)
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ /* check if GRE, which has no enc_ports */
+ if (netif_is_gretap(netdev)) {
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
- break;
- case htons(GENEVE_UDP_PORT):
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ }
+ } else {
+ flow_rule_match_enc_ports(rule, &enc_ports);
+ if (enc_ports.mask->dst != cpu_to_be16(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_GENEVE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
- if (!enc_op.key)
- break;
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
- return -EOPNOTSUPP;
- err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
- &key_size);
+ err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
+ enc_op.key,
+ &key_layer_two,
+ &key_layer,
+ &key_size, priv,
+ tun_type, extack);
if (err)
return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
- /* Ensure the ingress netdev matches the expected tun type. */
- if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
- return -EOPNOTSUPP;
+ /* Ensure the ingress netdev matches the expected
+ * tun type.
+ */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
+ return -EOPNOTSUPP;
+ }
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
@@ -272,6 +352,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* because we rely on it to get to the host.
*/
case cpu_to_be16(ETH_P_ARP):
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
return -EOPNOTSUPP;
case cpu_to_be16(ETH_P_MPLS_UC):
@@ -287,17 +368,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
default:
- /* Other ethtype - we need check the masks for the
- * remainder of the key to ensure we can offload.
- */
- if (nfp_flower_check_higher_than_mac(flow))
- return -EOPNOTSUPP;
- break;
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
+ return -EOPNOTSUPP;
}
+ } else if (nfp_flower_check_higher_than_mac(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
+ return -EOPNOTSUPP;
}
if (basic.mask && basic.mask->ip_proto) {
- /* Ethernet type is present in the key. */
switch (basic.key->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
@@ -307,14 +386,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_layer |= NFP_FLOWER_LAYER_TP;
key_size += sizeof(struct nfp_flower_tp_ports);
break;
- default:
- /* Other ip proto - we need check the masks for the
- * remainder of the key to ensure we can offload.
- */
- return -EOPNOTSUPP;
}
}
+ if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
+ nfp_flower_check_higher_than_l3(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
+ return -EOPNOTSUPP;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_match_tcp tcp;
u32 tcp_flags;
@@ -322,22 +402,28 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_tcp(rule, &tcp);
tcp_flags = be16_to_cpu(tcp.key->flags);
- if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
+ if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
return -EOPNOTSUPP;
+ }
/* We only support PSH and URG flags when either
* FIN, SYN or RST is present as well.
*/
if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
- !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
+ !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
return -EOPNOTSUPP;
+ }
/* We need to store TCP flags in the either the IPv4 or IPv6 key
* space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already.
*/
- if (!basic.key)
+ if (!basic.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
return -EOPNOTSUPP;
+ }
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
@@ -353,6 +439,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
return -EOPNOTSUPP;
}
}
@@ -362,8 +449,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
return -EOPNOTSUPP;
+ }
}
ret_key_ls->key_layer = key_layer;
@@ -771,14 +860,16 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
- struct tc_cls_flower_offload merge_tc_off;
+ struct flow_cls_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
int err;
ASSERT_RTNL();
+ extack = merge_tc_off.common.extack;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@@ -816,7 +907,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
merge_tc_off.cookie = merge_flow->tc_flower_cookie;
err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
- merge_flow->ingress_dev);
+ merge_flow->ingress_dev, extack);
if (err)
goto err_unlink_sub_flow2;
@@ -865,15 +956,17 @@ err_destroy_merge_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
@@ -882,7 +975,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
return -ENOMEM;
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
- &tun_type);
+ &tun_type, extack);
if (err)
goto err_free_key_ls;
@@ -893,23 +986,25 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
}
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
- flow_pay, tun_type);
+ flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
+ err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
flow_pay->tc_flower_cookie = flow->cookie;
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
nfp_flower_table_params);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
goto err_release_metadata;
+ }
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
@@ -1024,19 +1119,23 @@ nfp_flower_del_linked_merge_flows(struct nfp_app *app,
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
return -ENOENT;
+ }
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -1127,15 +1226,19 @@ nfp_flower_update_merge_stats(struct nfp_app *app,
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
+ extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
return -EINVAL;
+ }
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
@@ -1156,17 +1259,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct flow_cls_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
@@ -1193,27 +1296,45 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
}
}
+static LIST_HEAD(nfp_block_cb_list);
+
static int nfp_flower_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_flower_repr_priv *repr_priv;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
repr_priv = repr->app_priv;
- repr_priv->block_shared = tcf_block_shared(f->block);
+ repr_priv->block_shared = f->block_shared;
+ f->driver_block_list = &nfp_block_cb_list;
switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_flower_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_tc_block_cb,
- repr);
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
+ repr, repr, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -1258,7 +1379,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct tc_cls_flower_offload *flower = type_data;
+ struct flow_cls_offload *flower = type_data;
if (flower->common.chain_index)
return -EOPNOTSUPP;
@@ -1272,21 +1393,29 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
}
}
+static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- int err;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
- !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
- case TC_BLOCK_BIND:
+ case FLOW_BLOCK_BIND:
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
if (!cb_priv)
return -ENOMEM;
@@ -1295,26 +1424,31 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- err = tcf_block_cb_register(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv, cb_priv, f->extack);
- if (err) {
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ nfp_flower_setup_indr_tc_release);
+ if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
+ return PTR_ERR(block_cb);
}
- return err;
- case TC_BLOCK_UNBIND:
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
if (!cb_priv)
return -ENOENT;
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv);
- list_del(&cb_priv->list);
- kfree(cb_priv);
+ block_cb = flow_block_cb_lookup(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8c67505865a4..a7a80f4b722a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -162,8 +162,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
pay_len = nfp_flower_cmsg_get_data_len(skb);
- if (pay_len != sizeof(struct nfp_tun_active_tuns) +
- sizeof(struct route_ip_info) * count) {
+ if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 948d1a4b4643..60e57f08de80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -596,6 +596,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
struct nfp_pf *pf;
int err;
+ if (pdev->vendor == PCI_VENDOR_ID_NETRONOME &&
+ pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
+ dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df9aff2684ed..5d6c3738b494 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -12,11 +12,14 @@
#ifndef _NFP_NET_H_
#define _NFP_NET_H_
+#include <linux/atomic.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -238,7 +241,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -365,6 +368,7 @@ struct nfp_net_rx_ring {
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
* @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums
+ * @hw_tls_rx: Number of packets with TLS decrypted by hardware
* @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets
* @tx_bytes: Number of Transmitted bytes
@@ -372,6 +376,11 @@ struct nfp_net_rx_ring {
* @hw_csum_tx_inner: Counter of inner TX checksum offload requests
* @tx_gather: Counter of packets with Gather DMA
* @tx_lso: Counter of LSO packets sent
+ * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
+ * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
+ * by the fallback path because packets came out of order
+ * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
+ * path could not encrypt them
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
@@ -392,7 +401,7 @@ struct nfp_net_r_vector {
struct {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
- struct spinlock lock;
+ spinlock_t lock;
};
};
@@ -408,22 +417,30 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete;
+ u64 hw_tls_rx;
+
+ u64 hw_csum_rx_error;
+ u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
- u64 hw_csum_tx;
+
+ u64 ____cacheline_aligned_in_smp hw_csum_tx;
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+ u64 hw_tls_tx;
- u64 hw_csum_rx_error;
- u64 rx_replace_buf_alloc_fail;
+ u64 tls_tx_fallback;
+ u64 tls_tx_no_fallback;
u64 tx_errors;
u64 tx_busy;
+ /* Cold data follows */
+
u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
@@ -458,6 +475,7 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @chained_metadata_format: Firemware will use new metadata format
+ * @ktls_tx: Is kTLS TX enabled?
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -482,6 +500,7 @@ struct nfp_net_dp {
u8 is_vf:1;
u8 chained_metadata_format:1;
+ u8 ktls_tx:1;
u8 rx_dma_dir;
u8 rx_offset;
@@ -549,7 +568,7 @@ struct nfp_net_dp {
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
* @bar_lock: vNIC config BAR access lock, protects: update,
- * mailbox area
+ * mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -562,6 +581,18 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @tlv_caps: Parsed TLV capabilities
+ * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
+ * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
+ * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
+ * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
+ * lack of space
+ * @mbox_cmsg: Common Control Message via vNIC mailbox state
+ * @mbox_cmsg.queue: CCM mbox queue of pending messages
+ * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
+ * @mbox_cmsg.workq: CCM mbox work queue for @wait_work and @runq_work
+ * @mbox_cmsg.wait_work: CCM mbox posted msg reconfig wait work
+ * @mbox_cmsg.runq_work: CCM mbox posted msg queue runner work
+ * @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -620,7 +651,7 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
- struct mutex bar_lock;
+ struct semaphore bar_lock;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -637,6 +668,22 @@ struct nfp_net {
struct nfp_net_tlv_caps tlv_caps;
+ unsigned int ktls_tx_conn_cnt;
+ unsigned int ktls_rx_conn_cnt;
+
+ atomic64_t ktls_conn_id_gen;
+
+ atomic_t ktls_no_space;
+
+ struct {
+ struct sk_buff_head queue;
+ wait_queue_head_t wq;
+ struct workqueue_struct *workq;
+ struct work_struct wait_work;
+ struct work_struct runq_work;
+ u16 tag;
+ } mbox_cmsg;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -848,12 +895,17 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
{
- mutex_lock(&nn->bar_lock);
+ down(&nn->bar_lock);
+}
+
+static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
+{
+ return !down_trylock(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{
- mutex_unlock(&nn->bar_lock);
+ up(&nn->bar_lock);
}
/* Globals */
@@ -883,6 +935,7 @@ void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
@@ -891,6 +944,8 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b82b684f52ce..9903805717da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
@@ -37,14 +36,17 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
+#include <net/tls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
+#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+#include "crypto/crypto.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -228,6 +230,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_lock_bh(&nn->reconfig_lock);
+ WARN_ON(nn->reconfig_sync_present);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
@@ -271,12 +274,10 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
- lockdep_assert_held(&nn->bar_lock);
-
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
@@ -331,7 +332,6 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
@@ -343,6 +343,24 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
+
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
+}
+
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nfp_net_reconfig_wait_posted(nn);
+
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
+}
+
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
{
int ret;
@@ -804,6 +822,99 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync);
}
+static struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct sk_buff *nskb;
+ bool resync_pending;
+ u32 datalen, seq;
+
+ if (likely(!dp->ktls_tx))
+ return skb;
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ return skb;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ resync_pending = tls_offload_tx_resync_pending(skb->sk);
+ if (unlikely(resync_pending || ntls->next_seq != seq)) {
+ /* Pure ACK out of order already */
+ if (!datalen)
+ return skb;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ nskb = tls_encrypt_skb(skb);
+ if (!nskb) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_no_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NULL;
+ }
+ /* encryption wasn't necessary */
+ if (nskb == skb)
+ return skb;
+ /* we don't re-check ring space */
+ if (unlikely(skb_is_nonlinear(nskb))) {
+ nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(nskb);
+ return NULL;
+ }
+
+ /* jump forward, a TX may have gotten lost, need to sync TX */
+ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
+ tls_offload_tx_resync_request(nskb->sk);
+
+ *nr_frags = 0;
+ return nskb;
+ }
+
+ if (datalen) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb_is_gso(skb))
+ r_vec->hw_tls_tx++;
+ else
+ r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ ntls->next_seq += datalen;
+#endif
+ return skb;
+}
+
+static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ u32 datalen, seq;
+
+ if (!tls_handle)
+ return;
+ if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
+ return;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ if (ntls->next_seq == seq + datalen)
+ ntls->next_seq = seq;
+ else
+ WARN_ON_ONCE(1);
+#endif
+}
+
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
@@ -811,24 +922,47 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
-static int nfp_net_prep_port_id(struct sk_buff *skb)
+static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ if (likely(!md_dst && !tls_handle))
return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
+
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
- if (unlikely(skb_cow_head(skb, 8)))
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- data = skb_push(skb, 8);
- put_unaligned_be32(NFP_NET_META_PORTID, data);
- put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
- return 8;
+ return md_bytes;
}
/**
@@ -851,6 +985,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
+ u64 tls_handle = 0;
u16 qidx;
dp = &nn->dp;
@@ -872,18 +1007,21 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- md_bytes = nfp_net_prep_port_id(skb);
- if (unlikely(md_bytes < 0)) {
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
- goto err_free;
+ goto err_dma_err;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
@@ -979,12 +1117,14 @@ err_unmap:
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
-err_free:
+err_dma_err:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1857,6 +1997,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
@@ -1867,6 +2016,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->r_vec->napi, skb);
} else {
skb->dev = netdev;
+ skb_reset_network_header(skb);
__skb_push(skb, ETH_HLEN);
dev_queue_xmit(skb);
}
@@ -3704,7 +3854,7 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
- mutex_init(&nn->bar_lock);
+ sema_init(&nn->bar_lock, 1);
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
@@ -3716,6 +3866,10 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
if (err)
goto err_free_nn;
+ err = nfp_ccm_mbox_alloc(nn);
+ if (err)
+ goto err_free_nn;
+
return nn;
err_free_nn:
@@ -3733,8 +3887,7 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
-
- mutex_destroy(&nn->bar_lock);
+ nfp_ccm_mbox_free(nn);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
@@ -4009,14 +4162,27 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
- if (nn->dp.netdev)
+ if (nn->dp.netdev) {
nfp_net_netdev_init(nn);
+ err = nfp_ccm_mbox_init(nn);
+ if (err)
+ return err;
+
+ err = nfp_net_tls_init(nn);
+ if (err)
+ goto err_clean_mbox;
+ }
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
return 0;
return register_netdev(nn->dp.netdev);
+
+err_clean_mbox:
+ nfp_ccm_mbox_clean(nn);
+ return err;
}
/**
@@ -4029,5 +4195,6 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_ccm_mbox_clean(nn);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 6d5213b5bcb0..d835c14b7257 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -99,6 +99,21 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->repr_cap = readl(data);
break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ if (length >= 4)
+ caps->mbox_cmsg_types = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 25919e338071..ee6b24e4eacd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -44,6 +44,7 @@
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
+#define NFP_NET_META_CONN_HANDLE 7
#define NFP_META_PORT_ID_CTRL ~0U
@@ -135,6 +136,7 @@
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
+#define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -394,6 +396,7 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
/**
* VLAN filtering using general use mailbox
@@ -466,6 +469,16 @@
* %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
* Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
* can be used on representors.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ * Variable, bitmap of control message types supported by the mailbox handler.
+ * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are
+ * encapsulated into simple TLVs, with an end TLV and written to the Mailbox.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ * 8 words, bitmaps of supported and enabled crypto operations.
+ * First 16B (4 words) contains a bitmap of supported crypto operations,
+ * and next 16B contain the enabled operations.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -475,6 +488,8 @@
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
+#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
struct device;
@@ -484,12 +499,18 @@ struct device;
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
* @repr_cap: capabilities for representors
+ * @mbox_cmsg_types: cmsgs which can be passed through the mailbox
+ * @crypto_ops: supported crypto operations
+ * @crypto_enable_off: offset of crypto ops enable region
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
u32 repr_cap;
+ u32 mbox_cmsg_types;
+ u32 crypto_ops;
+ unsigned int crypto_enable_off;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 851e31e0ba8e..d9cbe84ac6ad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -150,8 +150,9 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS 9
+#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
+#define NN_CTRL_PATH_STATS 1
#define SFP_SFF_REV_COMPLIANCE 1
@@ -423,7 +424,8 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
+ NN_CTRL_PATH_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -442,10 +444,16 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
+ data = nfp_pr_et(data, "rx_tls_decrypted");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
+ data = nfp_pr_et(data, "tx_tls_encrypted");
+ data = nfp_pr_et(data, "tx_tls_ooo");
+ data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
+
+ data = nfp_pr_et(data, "hw_tls_no_space");
return data;
}
@@ -468,16 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
+ tmp[5] = nn->r_vecs[i].hw_tls_rx;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[5] = nn->r_vecs[i].hw_csum_tx;
- tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[7] = nn->r_vecs[i].tx_gather;
- tmp[8] = nn->r_vecs[i].tx_lso;
+ tmp[6] = nn->r_vecs[i].hw_csum_tx;
+ tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[8] = nn->r_vecs[i].tx_gather;
+ tmp[9] = nn->r_vecs[i].tx_lso;
+ tmp[10] = nn->r_vecs[i].hw_tls_tx;
+ tmp[11] = nn->r_vecs[i].tls_tx_fallback;
+ tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -489,6 +501,8 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
+ *data++ = atomic_read(&nn->ktls_no_space);
+
return data;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 42cf4fd875ea..9a08623c325d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -241,11 +241,16 @@ static int nfp_nsp_check(struct nfp_nsp *state)
state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
- if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ if (state->ver.major != NSP_MAJOR) {
nfp_err(cpp, "Unsupported ABI %hu.%hu\n",
state->ver.major, state->ver.minor);
return -EINVAL;
}
+ if (state->ver.minor < NSP_MINOR) {
+ nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n",
+ NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR);
+ return -EINVAL;
+ }
if (reg & NSP_STATUS_BUSY) {
nfp_err(cpp, "Service processor busy!\n");