aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
commit5bbcc0f595fadb4cac0eddc4401035ec0bd95b09 (patch)
tree3b65e490cc36a6c6fecac1fa24d9e0ac9ced4455 /drivers/net/ethernet/netronome/nfp
parentMerge tag 'mips_4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/mips (diff)
parenttcp: highest_sack fix (diff)
downloadlinux-dev-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.tar.xz
linux-dev-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Maintain the TCP retransmit queue using an rbtree, with 1GB windows at 100Gb this really has become necessary. From Eric Dumazet. 2) Multi-program support for cgroup+bpf, from Alexei Starovoitov. 3) Perform broadcast flooding in hardware in mv88e6xxx, from Andrew Lunn. 4) Add meter action support to openvswitch, from Andy Zhou. 5) Add a data meta pointer for BPF accessible packets, from Daniel Borkmann. 6) Namespace-ify almost all TCP sysctl knobs, from Eric Dumazet. 7) Turn on Broadcom Tags in b53 driver, from Florian Fainelli. 8) More work to move the RTNL mutex down, from Florian Westphal. 9) Add 'bpftool' utility, to help with bpf program introspection. From Jakub Kicinski. 10) Add new 'cpumap' type for XDP_REDIRECT action, from Jesper Dangaard Brouer. 11) Support 'blocks' of transformations in the packet scheduler which can span multiple network devices, from Jiri Pirko. 12) TC flower offload support in cxgb4, from Kumar Sanghvi. 13) Priority based stream scheduler for SCTP, from Marcelo Ricardo Leitner. 14) Thunderbolt networking driver, from Amir Levy and Mika Westerberg. 15) Add RED qdisc offloadability, and use it in mlxsw driver. From Nogah Frankel. 16) eBPF based device controller for cgroup v2, from Roman Gushchin. 17) Add some fundamental tracepoints for TCP, from Song Liu. 18) Remove garbage collection from ipv6 route layer, this is a significant accomplishment. From Wei Wang. 19) Add multicast route offload support to mlxsw, from Yotam Gigi" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2177 commits) tcp: highest_sack fix geneve: fix fill_info when link down bpf: fix lockdep splat net: cdc_ncm: GetNtbFormat endian fix openvswitch: meter: fix NULL pointer dereference in ovs_meter_cmd_reply_start netem: remove unnecessary 64 bit modulus netem: use 64 bit divide by rate tcp: Namespace-ify sysctl_tcp_default_congestion_control net: Protect iterations over net::fib_notifier_ops in fib_seq_sum() ipv6: set all.accept_dad to 0 by default uapi: fix linux/tls.h userspace compilation error usbnet: ipheth: prevent TX queue timeouts when device not ready vhost_net: conditionally enable tx polling uapi: fix linux/rxrpc.h userspace compilation errors net: stmmac: fix LPI transitioning for dwmac4 atm: horizon: Fix irq release error net-sysfs: trigger netlink notification on ifalias change via sysfs openvswitch: Using kfree_rcu() to simplify the code openvswitch: Make local function ovs_nsh_key_attr_size() static openvswitch: Fix return value check in ovs_meter_cmd_features() ...
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp')
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c1299
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c128
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h114
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c278
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c123
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c420
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h146
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c153
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c804
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c257
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h307
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c146
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c87
31 files changed, 3658 insertions, 1104 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 3cafa3d15082..24c4408b5734 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
@@ -27,8 +28,6 @@ nfp-objs := \
nfp_net_sriov.o \
nfp_netvf_main.o \
nfp_port.o \
- bpf/main.o \
- bpf/offload.o \
nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
@@ -38,11 +37,14 @@ nfp-objs += \
flower/main.o \
flower/match.o \
flower/metadata.o \
- flower/offload.o
+ flower/offload.o \
+ flower/tunnel_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/main.o \
+ bpf/offload.o \
bpf/verifier.o \
bpf/jit.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 239dfbe8a0a1..995e95410b11 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -77,17 +77,6 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return meta->l.prev != &nfp_prog->insns;
}
-static void nfp_prog_free(struct nfp_prog *nfp_prog)
-{
- struct nfp_insn_meta *meta, *tmp;
-
- list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
- list_del(&meta->l);
- kfree(meta);
- }
- kfree(nfp_prog);
-}
-
static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
{
if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
@@ -110,150 +99,7 @@ nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
return offset - nfp_prog->start_off;
}
-/* --- SW reg --- */
-struct nfp_insn_ur_regs {
- enum alu_dst_ab dst_ab;
- u16 dst;
- u16 areg, breg;
- bool swap;
- bool wr_both;
-};
-
-struct nfp_insn_re_regs {
- enum alu_dst_ab dst_ab;
- u8 dst;
- u8 areg, breg;
- bool swap;
- bool wr_both;
- bool i8;
-};
-
-static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_NNR:
- return UR_REG_NN | val;
- case NN_REG_XFER:
- return UR_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~0xff) {
- pr_err("immediate too large\n");
- return 0;
- }
- return UR_REG_IMM_encode(val);
- case NN_REG_NONE:
- return is_dst ? UR_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding %08x\n", swreg);
- return 0;
- }
-}
-
-static int
-swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_unreg(dst, true);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_unreg(rreg, false);
- reg->breg = nfp_swreg_to_unreg(lreg, false);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_unreg(lreg, false);
- reg->breg = nfp_swreg_to_unreg(rreg, false);
- }
-
- return 0;
-}
-
-static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_XFER:
- return RE_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~(0x7f | has_imm8 << 7)) {
- pr_err("immediate too large\n");
- return 0;
- }
- *i8 = val & 0x80;
- return RE_REG_IMM_encode(val & 0x7f);
- case NN_REG_NONE:
- return is_dst ? RE_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding\n");
- return 0;
- }
-}
-
-static int
-swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
- bool has_imm8)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- }
-
- return 0;
-}
-
/* --- Emitters --- */
-static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
- [CMD_TGT_WRITE8] = { 0x00, 0x42 },
- [CMD_TGT_READ8] = { 0x01, 0x43 },
- [CMD_TGT_READ_LE] = { 0x01, 0x40 },
- [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
-};
-
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
@@ -281,7 +127,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
static void
emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+ u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
{
struct nfp_insn_re_regs reg;
int err;
@@ -296,6 +142,11 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
nfp_prog->error = -EFAULT;
return;
}
+ if (reg.dst_lmextn || reg.src_lmextn) {
+ pr_err("cmd can't use LMextn\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
}
@@ -340,49 +191,10 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
}
static void
-__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
- u8 byte, bool equal, u16 addr, u8 defer)
-{
- u16 addr_lo, addr_hi;
- u64 insn;
-
- addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
- addr_hi = addr != addr_lo;
-
- insn = OP_BBYTE_BASE |
- FIELD_PREP(OP_BB_A_SRC, areg) |
- FIELD_PREP(OP_BB_BYTE, byte) |
- FIELD_PREP(OP_BB_B_SRC, breg) |
- FIELD_PREP(OP_BB_I8, imm8) |
- FIELD_PREP(OP_BB_EQ, equal) |
- FIELD_PREP(OP_BB_DEFBR, defer) |
- FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
- FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
-
- nfp_prog_push(nfp_prog, insn);
-}
-
-static void
-emit_br_byte_neq(struct nfp_prog *nfp_prog,
- u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
-{
- struct nfp_insn_re_regs reg;
- int err;
-
- err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
- if (err) {
- nfp_prog->error = err;
- return;
- }
-
- __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
- defer);
-}
-
-static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
- enum immed_shift shift, bool wr_both)
+ enum immed_shift shift, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -393,19 +205,21 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
FIELD_PREP(OP_IMMED_WIDTH, width) |
FIELD_PREP(OP_IMMED_INV, invert) |
FIELD_PREP(OP_IMMED_SHIFT, shift) |
- FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
+ FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
enum immed_width width, bool invert, enum immed_shift shift)
{
struct nfp_insn_ur_regs reg;
int err;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ if (swreg_type(dst) == NN_REG_IMM) {
nfp_prog->error = -EFAULT;
return;
}
@@ -417,13 +231,15 @@ emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
}
__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both);
+ invert, shift, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
enum shf_sc sc, u8 shift,
- u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -445,14 +261,16 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_SHF_SHIFT, shift) |
FIELD_PREP(OP_SHF_OP, op) |
FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
- FIELD_PREP(OP_SHF_WR_AB, wr_both);
+ FIELD_PREP(OP_SHF_WR_AB, wr_both) |
+ FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
- enum shf_sc sc, u8 shift)
+emit_shf(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
{
struct nfp_insn_re_regs reg;
int err;
@@ -464,12 +282,14 @@ emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
}
__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
- reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
- u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -480,13 +300,16 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_ALU_SW, swap) |
FIELD_PREP(OP_ALU_OP, op) |
FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
- FIELD_PREP(OP_ALU_WR_AB, wr_both);
+ FIELD_PREP(OP_ALU_WR_AB, wr_both) |
+ FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+emit_alu(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum alu_op op, swreg rreg)
{
struct nfp_insn_ur_regs reg;
int err;
@@ -498,13 +321,15 @@ emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
}
__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
- reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
- bool zero, bool swap, bool wr_both)
+ bool zero, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -517,33 +342,84 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
FIELD_PREP(OP_LDF_ZF, zero) |
FIELD_PREP(OP_LDF_BMASK, bmask) |
FIELD_PREP(OP_LDF_SHF, shift) |
- FIELD_PREP(OP_LDF_WR_AB, wr_both);
+ FIELD_PREP(OP_LDF_WR_AB, wr_both) |
+ FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
- u32 dst, u8 bmask, u32 src, bool zero)
+emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
+ enum shf_sc sc, u8 shift, bool zero)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ /* Note: ld_field is special as it uses one of the src regs as dst */
+ err = swreg_to_restricted(dst, dst, src, &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
- reg.i8, zero, reg.swap, reg.wr_both);
+ reg.i8, zero, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
-emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
enum shf_sc sc, u8 shift)
{
- emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+ emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
+}
+
+static void
+__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
+ bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_LCSR_BASE |
+ FIELD_PREP(OP_LCSR_A_SRC, areg) |
+ FIELD_PREP(OP_LCSR_B_SRC, breg) |
+ FIELD_PREP(OP_LCSR_WRITE, wr) |
+ FIELD_PREP(OP_LCSR_ADDR, addr) |
+ FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ /* This instruction takes immeds instead of reg_none() for the ignored
+ * operand, but we can't encode 2 immeds in one instr with our normal
+ * swreg infra so if param is an immed, we encode as reg_none() and
+ * copy the immed to both operands.
+ */
+ if (swreg_type(src) == NN_REG_IMM) {
+ err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
+ reg.breg = reg.areg;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
+ }
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4,
+ false, reg.src_lmextn);
+}
+
+static void emit_nop(struct nfp_prog *nfp_prog)
+{
+ __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
}
/* --- Wrappers --- */
@@ -565,7 +441,7 @@ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
return true;
}
-static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
{
enum immed_shift shift;
u16 val;
@@ -586,7 +462,7 @@ static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(UR_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -599,7 +475,7 @@ static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(RE_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -608,6 +484,12 @@ static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
return tmp_reg;
}
+static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
+{
+ while (count--)
+ emit_nop(nfp_prog);
+}
+
static void
wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
enum br_special special)
@@ -618,78 +500,374 @@ wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
FIELD_PREP(OP_BR_SPECIAL, special);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
{
- emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
- u16 src, bool src_valid, u8 size)
+data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
- u32 tmp_reg;
/* We load the value from the address indicated in @offset and then
* shift out the data we don't need. Note: this is big endian!
*/
- sz = size < 4 ? 4 : size;
+ sz = max(size, 4);
shift = size < 4 ? 4 - size : 0;
- if (src_valid) {
- /* Calculate the true offset (src_reg + imm) */
- tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_alu(nfp_prog, imm_both(nfp_prog),
- reg_a(src), ALU_OP_ADD, tmp_reg);
- /* Check packet length (size guaranteed to fit b/c it's u8) */
- emit_alu(nfp_prog, imm_a(nfp_prog),
- imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
- } else {
- /* Check packet length */
- tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
- imm_a(nfp_prog));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
- }
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pptr_reg(nfp_prog), offset, sz - 1, true);
i = 0;
if (shift)
- emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
reg_xfer(0), SHF_SC_R_SHF, shift * 8);
else
for (; i * 4 < size; i++)
- emit_alu(nfp_prog, reg_both(i),
- reg_none(), ALU_OP_NONE, reg_xfer(i));
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+
+ return 0;
+}
+
+static int
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, int size)
+{
+ unsigned int i;
+ u8 mask, sz;
+
+ /* We load the value from the address indicated in @offset and then
+ * mask out the data we don't need. Note: this is little endian!
+ */
+ sz = max(size, 4);
+ mask = size < 4 ? GENMASK(size - 1, 0) : 0;
+
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
+ reg_a(src_gpr), offset, sz / 4 - 1, true);
+
+ i = 0;
+ if (mask)
+ emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
+ reg_xfer(0), SHF_SC_NONE, 0, true);
+ else
+ for (; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(1), 0);
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
return 0;
}
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+{
+ swreg tmp_reg;
+
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
+
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+}
+
static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
{
- return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+ swreg tmp_reg;
+
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ return data_ld(nfp_prog, tmp_reg, 0, size);
+}
+
+static int
+data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u8 src_gpr, u8 size)
+{
+ unsigned int i;
+
+ for (i = 0; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
+
+ return 0;
}
-static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+static int
+data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u64 imm, u8 size)
{
- emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
- reg_none(), ALU_OP_NONE, reg_b(src));
- emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
- NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+ wrp_immed(nfp_prog, reg_xfer(0), imm);
+ if (size == 8)
+ wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
+
+ return 0;
+}
+
+typedef int
+(*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc);
+
+static int
+wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc)
+{
+ bool should_inc = needs_inc && new_gpr && !last;
+ u32 idx, src_byte;
+ enum shf_sc sc;
+ swreg reg;
+ int shf;
+ u8 mask;
+
+ if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
+ return -EOPNOTSUPP;
+
+ idx = off / 4;
+
+ /* Move the entire word */
+ if (size == 4) {
+ wrp_mov(nfp_prog, reg_both(dst),
+ should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
+ return -EOPNOTSUPP;
+
+ src_byte = off % 4;
+
+ mask = (1 << size) - 1;
+ mask <<= dst_byte;
+
+ if (WARN_ON_ONCE(mask > 0xf))
+ return -EOPNOTSUPP;
+
+ shf = abs(src_byte - dst_byte) * 8;
+ if (src_byte == dst_byte) {
+ sc = SHF_SC_NONE;
+ } else if (src_byte < dst_byte) {
+ shf = 32 - shf;
+ sc = SHF_SC_L_SHF;
+ } else {
+ sc = SHF_SC_R_SHF;
+ }
+
+ /* ld_field can address fewer indexes, if offset too large do RMW.
+ * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
+ */
+ if (idx <= RE_REG_LM_IDX_MAX) {
+ reg = reg_lm(lm3 ? 3 : 0, idx);
+ } else {
+ reg = imm_a(nfp_prog);
+ /* If it's not the first part of the load and we start a new GPR
+ * that means we are loading a second part of the LMEM word into
+ * a new GPR. IOW we've already looked that LMEM word and
+ * therefore it has been loaded into imm_a().
+ */
+ if (first || !new_gpr)
+ wrp_mov(nfp_prog, reg, reg_lm(0, idx));
+ }
+
+ emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
+
+ if (should_inc)
+ wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
+
+ return 0;
+}
+
+static int
+wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc)
+{
+ bool should_inc = needs_inc && new_gpr && !last;
+ u32 idx, dst_byte;
+ enum shf_sc sc;
+ swreg reg;
+ int shf;
+ u8 mask;
+
+ if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
+ return -EOPNOTSUPP;
+
+ idx = off / 4;
+
+ /* Move the entire word */
+ if (size == 4) {
+ wrp_mov(nfp_prog,
+ should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
+ reg_b(src));
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
+ return -EOPNOTSUPP;
+
+ dst_byte = off % 4;
+
+ mask = (1 << size) - 1;
+ mask <<= dst_byte;
+
+ if (WARN_ON_ONCE(mask > 0xf))
+ return -EOPNOTSUPP;
+
+ shf = abs(src_byte - dst_byte) * 8;
+ if (src_byte == dst_byte) {
+ sc = SHF_SC_NONE;
+ } else if (src_byte < dst_byte) {
+ shf = 32 - shf;
+ sc = SHF_SC_L_SHF;
+ } else {
+ sc = SHF_SC_R_SHF;
+ }
+
+ /* ld_field can address fewer indexes, if offset too large do RMW.
+ * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
+ */
+ if (idx <= RE_REG_LM_IDX_MAX) {
+ reg = reg_lm(lm3 ? 3 : 0, idx);
+ } else {
+ reg = imm_a(nfp_prog);
+ /* Only first and last LMEM locations are going to need RMW,
+ * the middle location will be overwritten fully.
+ */
+ if (first || last)
+ wrp_mov(nfp_prog, reg, reg_lm(0, idx));
+ }
+
+ emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
+
+ if (new_gpr || last) {
+ if (idx > RE_REG_LM_IDX_MAX)
+ wrp_mov(nfp_prog, reg_lm(0, idx), reg);
+ if (should_inc)
+ wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
+ }
+
+ return 0;
+}
+
+static int
+mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
+ bool clr_gpr, lmem_step step)
+{
+ s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
+ bool first = true, last;
+ bool needs_inc = false;
+ swreg stack_off_reg;
+ u8 prev_gpr = 255;
+ u32 gpr_byte = 0;
+ bool lm3 = true;
+ int ret;
+
+ if (meta->ptr_not_const) {
+ /* Use of the last encountered ptr_off is OK, they all have
+ * the same alignment. Depend on low bits of value being
+ * discarded when written to LMaddr register.
+ */
+ stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
+ stack_imm(nfp_prog));
+
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
+
+ needs_inc = true;
+ } else if (off + size <= 64) {
+ /* We can reach bottom 64B with LMaddr0 */
+ lm3 = false;
+ } else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
+ /* We have to set up a new pointer. If we know the offset
+ * and the entire access falls into a single 32 byte aligned
+ * window we won't have to increment the LM pointer.
+ * The 32 byte alignment is imporant because offset is ORed in
+ * not added when doing *l$indexN[off].
+ */
+ stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
+
+ off %= 32;
+ } else {
+ stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
+ stack_imm(nfp_prog));
+
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
+
+ needs_inc = true;
+ }
+ if (lm3) {
+ emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
+ /* For size < 4 one slot will be filled by zeroing of upper. */
+ wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ }
+
+ if (clr_gpr && size < 8)
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+
+ while (size) {
+ u32 slice_end;
+ u8 slice_size;
+
+ slice_size = min(size, 4 - gpr_byte);
+ slice_end = min(off + slice_size, round_up(off + 1, 4));
+ slice_size = slice_end - off;
+
+ last = slice_size == size;
+
+ if (needs_inc)
+ off %= 4;
+
+ ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
+ first, gpr != prev_gpr, last, lm3, needs_inc);
+ if (ret)
+ return ret;
+
+ prev_gpr = gpr;
+ first = false;
+
+ gpr_byte += slice_size;
+ if (gpr_byte >= 4) {
+ gpr_byte -= 4;
+ gpr++;
+ }
+
+ size -= slice_size;
+ off += slice_size;
+ }
return 0;
}
@@ -697,7 +875,7 @@ static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{
- u32 tmp_reg;
+ swreg tmp_reg;
if (alu_op == ALU_OP_AND) {
if (!imm)
@@ -714,7 +892,7 @@ wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
if (alu_op == ALU_OP_XOR) {
if (!~imm)
emit_alu(nfp_prog, reg_both(dst), reg_none(),
- ALU_OP_NEG, reg_b(dst));
+ ALU_OP_NOT, reg_b(dst));
if (!imm || !~imm)
return;
}
@@ -815,7 +993,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
u8 reg = insn->dst_reg * 2;
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -844,7 +1022,10 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum br_mask br_mask, bool swap)
{
const struct bpf_insn *insn = &meta->insn;
- u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+ u8 areg, breg;
+
+ areg = insn->dst_reg * 2;
+ breg = insn->src_reg * 2;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -863,13 +1044,34 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
+static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
+{
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
+ SHF_SC_R_ROT, 16);
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+ u8 dst = insn->dst_reg * 2;
+ u8 src = insn->src_reg * 2;
+
+ if (insn->src_reg == BPF_REG_10) {
+ swreg stack_depth_reg;
+
+ stack_depth_reg = ur_load_imm_any(nfp_prog,
+ nfp_prog->stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else {
+ wrp_reg_mov(nfp_prog, dst, src);
+ wrp_reg_mov(nfp_prog, dst + 1, src + 1);
+ }
return 0;
}
@@ -964,28 +1166,64 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
+ ALU_OP_SUB, reg_b(insn->dst_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
+ ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
return 0;
}
-static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, 32 - insn->imm);
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst + 1, dst);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ }
- if (insn->imm != 32)
- return 1; /* TODO */
+ return 0;
+}
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, insn->imm);
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst, dst + 1);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ }
return 0;
}
@@ -1060,6 +1298,16 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u8 dst = meta->insn.dst_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1075,21 +1323,59 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 gpr = insn->dst_reg * 2;
+
+ switch (insn->imm) {
+ case 16:
+ emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
+ SHF_SC_R_SHF, 16);
+
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 32:
+ wrp_end32(nfp_prog, reg_a(gpr), gpr);
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 64:
+ wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
+
+ wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
+ wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
+ break;
+ }
+
+ return 0;
+}
+
static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
- meta->insn.imm);
+ struct nfp_insn_meta *prev = nfp_meta_prev(meta);
+ u32 imm_lo, imm_hi;
+ u8 dst;
+
+ dst = prev->insn.dst_reg * 2;
+ imm_lo = prev->insn.imm;
+ imm_hi = meta->insn.imm;
+
+ wrp_immed(nfp_prog, reg_both(dst), imm_lo);
+
+ /* mov is always 1 insn, load imm may be two, so try to use mov */
+ if (imm_hi == imm_lo)
+ wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
+ else
+ wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
return 0;
}
static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- const struct bpf_insn *insn = &meta->insn;
-
meta->double_cb = imm_ld8_part2;
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
-
return 0;
}
@@ -1111,82 +1397,235 @@ static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 1);
+ meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 2);
+ meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 4);
+ meta->insn.src_reg * 2, 4);
}
-static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int
+mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off)
{
- if (meta->insn.off == offsetof(struct sk_buff, len))
- emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
- else
+ return mem_op_stack(nfp_prog, meta, size, ptr_off,
+ meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
+ true, wrp_lmem_load);
+}
+
+static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
+{
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
+
+ switch (meta->insn.off) {
+ case offsetof(struct __sk_buff, len):
+ if (size != FIELD_SIZEOF(struct __sk_buff, len))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
+ break;
+ case offsetof(struct __sk_buff, data):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct __sk_buff, data_end):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
+ return -EOPNOTSUPP;
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
return 0;
}
-static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
{
- u32 dst = reg_both(meta->insn.dst_reg * 2);
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
- if (meta->insn.off != offsetof(struct xdp_md, data) &&
- meta->insn.off != offsetof(struct xdp_md, data_end))
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, data):
+ if (size != FIELD_SIZEOF(struct xdp_md, data))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct xdp_md, data_end):
+ if (size != FIELD_SIZEOF(struct xdp_md, data_end))
+ return -EOPNOTSUPP;
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
- if (meta->insn.off == offsetof(struct xdp_md, data))
- return 0;
+ return 0;
+}
- emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
+static int
+mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
- return 0;
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
+ meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_CTX) {
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
+ return mem_ldx_xdp(nfp_prog, meta, size);
+ else
+ return mem_ldx_skb(nfp_prog, meta, size);
+ }
+
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_ldx_data(nfp_prog, meta, size);
+
+ if (meta->ptr.type == PTR_TO_STACK)
+ return mem_ldx_stack(nfp_prog, meta, size,
+ meta->ptr.off + meta->ptr.var_off.value);
+
+ return -EOPNOTSUPP;
+}
+
+static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 1);
+}
+
+static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 2);
}
static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- int ret;
+ return mem_ldx(nfp_prog, meta, 4);
+}
- if (nfp_prog->act == NN_ACT_XDP)
- ret = mem_ldx4_xdp(nfp_prog, meta);
- else
- ret = mem_ldx4_skb(nfp_prog, meta);
+static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 8);
+}
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+static int
+mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+ swreg off_reg;
+
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return ret;
+ return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ imm, size);
}
-static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
{
- if (meta->insn.off == offsetof(struct sk_buff, mark))
- return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_st_data(nfp_prog, meta, size);
return -EOPNOTSUPP;
}
-static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 1);
+}
+
+static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 2);
+}
+
+static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 4);
+}
+
+static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ return mem_st(nfp_prog, meta, 8);
+}
+
+static int
+mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg off_reg;
+
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ meta->insn.src_reg * 2, size);
+}
+
+static int
+mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off)
+{
+ return mem_op_stack(nfp_prog, meta, size, ptr_off,
+ meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
+ false, wrp_lmem_store);
+}
+
+static int
+mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_stx_data(nfp_prog, meta, size);
+
+ if (meta->ptr.type == PTR_TO_STACK)
+ return mem_stx_stack(nfp_prog, meta, size,
+ meta->ptr.off + meta->ptr.var_off.value);
+
return -EOPNOTSUPP;
}
+static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 1);
+}
+
+static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 2);
+}
+
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (nfp_prog->act == NN_ACT_XDP)
- return mem_stx4_xdp(nfp_prog, meta);
- return mem_stx4_skb(nfp_prog, meta);
+ return mem_stx(nfp_prog, meta, 4);
+}
+
+static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 8);
}
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1202,8 +1641,10 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
- u32 tmp_reg;
+ swreg or1, or2, tmp_reg;
+
+ or1 = reg_a(insn->dst_reg * 2);
+ or2 = reg_b(insn->dst_reg * 2 + 1);
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1230,29 +1671,29 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
}
static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
}
static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
}
static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1283,7 +1724,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1292,6 +1733,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ return 0;
}
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
@@ -1327,22 +1769,22 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
}
static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
}
static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
}
static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1375,6 +1817,7 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
[BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
[BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
@@ -1389,7 +1832,9 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
[BPF_LD | BPF_ABS | BPF_H] = data_ld2,
@@ -1397,8 +1842,18 @@ static const instr_cb_t instr_cb[256] = {
[BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
[BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
[BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
+ [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
[BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
+ [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
+ [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
+ [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
+ [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
+ [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
+ [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
[BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
@@ -1510,37 +1965,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
static void nfp_intro(struct nfp_prog *nfp_prog)
{
- emit_alu(nfp_prog, pkt_reg(nfp_prog),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
-}
-
-static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
-{
- const u8 act2code[] = {
- [NN_ACT_TC_DROP] = 0x22,
- [NN_ACT_TC_REDIR] = 0x24
- };
- /* Target for aborts */
- nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- wrp_immed(nfp_prog, reg_both(0), 0);
-
- /* Target for normal exits */
- nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
- /* Legacy TC mode:
- * 0 0x11 -> pass, count as stat0
- * -1 drop 0x22 -> drop, count as stat1
- * redir 0x24 -> redir, count as stat1
- * ife mark 0x21 -> pass, count as stat1
- * ife + tx 0x24 -> redir, count as stat1
- */
- emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
- emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
-
- emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
- emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
- SHF_SC_L_SHF, 16);
+ wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
@@ -1562,8 +1989,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1572,8 +1998,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* if R0 > 7 jump to abort */
emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
wrp_immed(nfp_prog, reg_b(2), 0x41221211);
wrp_immed(nfp_prog, reg_b(3), 0x41001211);
@@ -1610,8 +2035,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1632,24 +2056,21 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
static void nfp_outro(struct nfp_prog *nfp_prog)
{
- switch (nfp_prog->act) {
- case NN_ACT_DIRECT:
+ switch (nfp_prog->type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
nfp_outro_tc_da(nfp_prog);
break;
- case NN_ACT_TC_DROP:
- case NN_ACT_TC_REDIR:
- nfp_outro_tc_legacy(nfp_prog);
- break;
- case NN_ACT_XDP:
+ case BPF_PROG_TYPE_XDP:
nfp_outro_xdp(nfp_prog);
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1688,29 +2109,11 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
if (nfp_prog->error)
return nfp_prog->error;
- return nfp_fixup_branches(nfp_prog);
-}
-
-static int
-nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
- unsigned int cnt)
-{
- unsigned int i;
-
- for (i = 0; i < cnt; i++) {
- struct nfp_insn_meta *meta;
-
- meta = kzalloc(sizeof(*meta), GFP_KERNEL);
- if (!meta)
- return -ENOMEM;
-
- meta->insn = prog[i];
- meta->n = i;
-
- list_add_tail(&meta->l, &nfp_prog->insns);
- }
+ wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
+ if (nfp_prog->error)
+ return nfp_prog->error;
- return 0;
+ return nfp_fixup_branches(nfp_prog);
}
/* --- Optimizations --- */
@@ -1737,38 +2140,6 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
-/* Try to rename registers so that program uses only low ones */
-static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
-{
- bool reg_used[MAX_BPF_REG] = {};
- u8 tgt_reg[MAX_BPF_REG] = {};
- struct nfp_insn_meta *meta;
- unsigned int i, j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
- continue;
-
- reg_used[meta->insn.src_reg] = true;
- reg_used[meta->insn.dst_reg] = true;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
- if (!reg_used[i])
- continue;
-
- tgt_reg[i] = j++;
- }
- nfp_prog->num_regs = j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
- meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
- }
-
- return 0;
-}
-
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -1845,79 +2216,47 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
- int ret;
-
nfp_bpf_opt_reg_init(nfp_prog);
- ret = nfp_bpf_opt_reg_rename(nfp_prog);
- if (ret)
- return ret;
-
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
return 0;
}
-/**
- * nfp_bpf_jit() - translate BPF code into NFP assembly
- * @filter: kernel BPF filter struct
- * @prog_mem: memory to store assembler instructions
- * @act: action attached to this eBPF program
- * @prog_start: offset of the first instruction when loaded
- * @prog_done: where to jump on exit
- * @prog_sz: size of @prog_mem in instructions
- * @res: achieved parameters of translation results
- */
-int
-nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
- enum nfp_bpf_action_type act,
- unsigned int prog_start, unsigned int prog_done,
- unsigned int prog_sz, struct nfp_bpf_result *res)
+static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
{
- struct nfp_prog *nfp_prog;
- int ret;
+ int i;
- nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
- if (!nfp_prog)
- return -ENOMEM;
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ int err;
- INIT_LIST_HEAD(&nfp_prog->insns);
- nfp_prog->act = act;
- nfp_prog->start_off = prog_start;
- nfp_prog->tgt_done = prog_done;
+ err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ if (err)
+ return err;
- ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
- if (ret)
- goto out;
+ nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
- ret = nfp_prog_verify(nfp_prog, filter);
- if (ret)
- goto out;
+ ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ }
- ret = nfp_bpf_optimize(nfp_prog);
- if (ret)
- goto out;
+ return 0;
+}
- if (nfp_prog->num_regs <= 7)
- nfp_prog->regs_per_thread = 16;
- else
- nfp_prog->regs_per_thread = 32;
+int nfp_bpf_jit(struct nfp_prog *nfp_prog)
+{
+ int ret;
- nfp_prog->prog = prog_mem;
- nfp_prog->__prog_alloc_len = prog_sz;
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ return ret;
ret = nfp_translate(nfp_prog);
if (ret) {
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
- ret = -EINVAL;
+ return -EINVAL;
}
- res->n_instr = nfp_prog->prog_len;
- res->dense_mode = nfp_prog->num_regs <= 7;
-out:
- nfp_prog_free(nfp_prog);
-
- return ret;
+ return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index be2cf10a2cd7..e379b78e86ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -42,9 +42,11 @@
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
+#ifdef __LITTLE_ENDIAN
if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
return true;
+#endif
return false;
}
@@ -52,28 +54,25 @@ static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
- struct tc_cls_bpf_offload cmd = {
- .prog = prog,
- };
+ bool running, xdp_running;
int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->dp.bpf_offload_xdp)
- return prog ? -EBUSY : 0;
- cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
- } else {
- if (!prog)
- return 0;
- cmd.command = TC_CLSBPF_ADD;
- }
+ running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ xdp_running = running && nn->dp.bpf_offload_xdp;
+
+ if (!prog && !xdp_running)
+ return 0;
+ if (prog && running && !xdp_running)
+ return -EBUSY;
- ret = nfp_net_bpf_offload(nn, &cmd);
+ ret = nfp_net_bpf_offload(nn, prog, running);
/* Stop offload if replace not possible */
- if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ if (ret && prog)
nfp_bpf_xdp_offload(app, nn, NULL);
+
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
@@ -83,59 +82,78 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
-static int
-nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
-{
- struct nfp_net_bpf_priv *priv;
- int ret;
-
- /* Limit to single port, otherwise it's just a NIC */
- if (id > 0) {
- nfp_warn(app->cpp,
- "BPF NIC doesn't support more than one port right now\n");
- nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
- return PTR_ERR_OR_ZERO(nn->port);
- }
-
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nn->app_priv = priv;
- spin_lock_init(&priv->rx_filter_lock);
- setup_timer(&priv->rx_filter_stats_timer,
- nfp_net_filter_stats_timer, (unsigned long)nn);
-
- ret = nfp_app_nic_vnic_alloc(app, nn, id);
- if (ret)
- kfree(priv);
-
- return ret;
-}
-
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
if (nn->dp.bpf_offload_xdp)
nfp_bpf_xdp_offload(app, nn, NULL);
- kfree(nn->app_priv);
}
-static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, void *type_data)
+static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net *nn = cb_priv;
- if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) ||
- !is_classid_clsact_ingress(cls_bpf->common.classid) ||
+ if (type != TC_SETUP_CLSBPF ||
+ !tc_can_offload(nn->dp.netdev) ||
+ !nfp_net_ebpf_capable(nn) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
-
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
- return nfp_net_bpf_offload(nn, cls_bpf);
+ /* Only support TC direct action */
+ if (!cls_bpf->exts_integrated ||
+ tcf_exts_has_actions(cls_bpf->exts)) {
+ nn_err(nn, "only direct action with no legacy actions supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
+ case TC_CLSBPF_ADD:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_offload(nn, NULL, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_bpf_setup_tc_block(struct net_device *netdev,
+ struct tc_block_offload *f)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_bpf_setup_tc_block_cb,
+ nn, nn);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_bpf_setup_tc_block_cb,
+ nn);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_bpf_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
@@ -149,10 +167,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
- .vnic_alloc = nfp_bpf_vnic_alloc,
+ .vnic_alloc = nfp_app_nic_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
.xdp_offload = nfp_bpf_xdp_offload,
+
+ .bpf_verifier_prep = nfp_bpf_verifier_prep,
+ .bpf_translate = nfp_bpf_translate,
+ .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4051e943f363..082a15f6dfb5 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -36,10 +36,11 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/list.h>
#include <linux/types.h>
-#include "../nfp_net.h"
+#include "../nfp_asm.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
@@ -53,51 +54,29 @@ enum br_special {
};
enum static_regs {
- STATIC_REG_PKT = 1,
-#define REG_PKT_BANK ALU_DST_A
- STATIC_REG_IMM = 2, /* Bank AB */
+ STATIC_REG_IMM = 21, /* Bank AB */
+ STATIC_REG_STACK = 22, /* Bank A */
+ STATIC_REG_PKT_LEN = 22, /* Bank B */
};
-enum nfp_bpf_action_type {
- NN_ACT_TC_DROP,
- NN_ACT_TC_REDIR,
- NN_ACT_DIRECT,
- NN_ACT_XDP,
+enum pkt_vec {
+ PKT_VEC_PKT_LEN = 0,
+ PKT_VEC_PKT_PTR = 2,
};
-/* Software register representation, hardware encoding in asm.h */
-#define NN_REG_TYPE GENMASK(31, 24)
-#define NN_REG_VAL GENMASK(7, 0)
-
-enum nfp_bpf_reg_type {
- NN_REG_GPR_A = BIT(0),
- NN_REG_GPR_B = BIT(1),
- NN_REG_NNR = BIT(2),
- NN_REG_XFER = BIT(3),
- NN_REG_IMM = BIT(4),
- NN_REG_NONE = BIT(5),
-};
-
-#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
-
-#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
-#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
-#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
-#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
-#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
-#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
-#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
+#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
-#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
-#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+#define stack_reg(np) reg_a(STATIC_REG_STACK)
+#define stack_imm(np) imm_b(np)
+#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
+#define pptr_reg(np) pv_ctm_ptr(np)
+#define imm_a(np) reg_a(STATIC_REG_IMM)
+#define imm_b(np) reg_b(STATIC_REG_IMM)
+#define imm_both(np) reg_both(STATIC_REG_IMM)
-#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
-#define NFP_BPF_ABI_MARK reg_nnr(1)
-#define NFP_BPF_ABI_PKT reg_nnr(2)
-#define NFP_BPF_ABI_LEN reg_nnr(3)
struct nfp_prog;
struct nfp_insn_meta;
@@ -113,6 +92,8 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
+ * @ptr: pointer type for memory operations
+ * @ptr_not_const: pointer is not always constant
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @skip: skip this instruction (optimized out)
@@ -121,6 +102,8 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
*/
struct nfp_insn_meta {
struct bpf_insn insn;
+ struct bpf_reg_state ptr;
+ bool ptr_not_const;
unsigned int off;
unsigned short n;
bool skip;
@@ -156,15 +139,15 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
- * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
- * @num_regs: number of registers used by this program
- * @regs_per_thread: number of basic registers allocated per thread
+ * @verifier_meta: temporary storage for verifier's insn meta
+ * @type: BPF program type
* @start_off: address of the first instruction in the memory
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
* @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
+ * @stack_depth: max stack depth from the verifier
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -172,10 +155,9 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
- enum nfp_bpf_action_type act;
+ struct nfp_insn_meta *verifier_meta;
- unsigned int num_regs;
- unsigned int regs_per_thread;
+ enum bpf_prog_type type;
unsigned int start_off;
unsigned int tgt_out;
@@ -185,40 +167,26 @@ struct nfp_prog {
unsigned int n_translated;
int error;
- struct list_head insns;
-};
+ unsigned int stack_depth;
-struct nfp_bpf_result {
- unsigned int n_instr;
- bool dense_mode;
+ struct list_head insns;
};
-int
-nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
- unsigned int prog_start, unsigned int prog_done,
- unsigned int prog_sz, struct nfp_bpf_result *res);
+int nfp_bpf_jit(struct nfp_prog *prog);
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+struct netdev_bpf;
+struct nfp_app;
struct nfp_net;
-struct tc_cls_bpf_offload;
-
-/**
- * struct nfp_net_bpf_priv - per-vNIC BPF private data
- * @rx_filter: Filter offload statistics - dropped packets/bytes
- * @rx_filter_prev: Filter offload statistics - values from previous update
- * @rx_filter_change: Jiffies when statistics last changed
- * @rx_filter_stats_timer: Timer for polling filter offload statistics
- * @rx_filter_lock: Lock protecting timer state changes (teardown)
- */
-struct nfp_net_bpf_priv {
- struct nfp_stat_pair rx_filter, rx_filter_prev;
- unsigned long rx_filter_change;
- struct timer_list rx_filter_stats_timer;
- spinlock_t rx_filter_lock;
-};
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
-void nfp_net_filter_stats_timer(unsigned long data);
+int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
+ bool old_prog);
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index a88bb5bc0082..b6cee71f49d3 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -51,112 +51,114 @@
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
-void nfp_net_filter_stats_timer(unsigned long data)
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
{
- struct nfp_net *nn = (void *)data;
- struct nfp_net_bpf_priv *priv;
- struct nfp_stat_pair latest;
-
- priv = nn->app_priv;
-
- spin_lock_bh(&priv->rx_filter_lock);
+ unsigned int i;
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
- mod_timer(&priv->rx_filter_stats_timer,
- jiffies + NFP_NET_STAT_POLL_IVL);
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
- spin_unlock_bh(&priv->rx_filter_lock);
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
- latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ meta->insn = prog[i];
+ meta->n = i;
- if (latest.pkts != priv->rx_filter.pkts)
- priv->rx_filter_change = jiffies;
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
- priv->rx_filter = latest;
+ return 0;
}
-static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
+ struct nfp_insn_meta *meta, *tmp;
- priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- priv->rx_filter_prev = priv->rx_filter;
- priv->rx_filter_change = jiffies;
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
}
-static int
-nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
- u64 bytes, pkts;
+ struct bpf_prog *prog = bpf->verifier.prog;
+ struct nfp_prog *nfp_prog;
+ int ret;
- pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
- bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
- bytes -= pkts * ETH_HLEN;
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+ prog->aux->offload->dev_priv = nfp_prog;
- priv->rx_filter_prev = priv->rx_filter;
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->type = prog->type;
- tcf_exts_stats_update(cls_bpf->exts,
- bytes, pkts, priv->rx_filter_change);
+ ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
+ if (ret)
+ goto err_free;
- return 0;
-}
+ nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
+ bpf->verifier.ops = &nfp_bpf_analyzer_ops;
-static int
-nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
-{
- const struct tc_action *a;
- LIST_HEAD(actions);
+ return 0;
- if (!cls_bpf->exts)
- return NN_ACT_XDP;
+err_free:
+ nfp_prog_free(nfp_prog);
- /* TC direct action */
- if (cls_bpf->exts_integrated) {
- if (!tcf_exts_has_actions(cls_bpf->exts))
- return NN_ACT_DIRECT;
+ return ret;
+}
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+ unsigned int stack_size;
+ unsigned int max_instr;
+
+ stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ if (prog->aux->stack_depth > stack_size) {
+ nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
+ prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
- /* TC legacy mode */
- if (!tcf_exts_has_one_action(cls_bpf->exts))
- return -EOPNOTSUPP;
+ nfp_prog->stack_depth = prog->aux->stack_depth;
+ nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
- tcf_exts_to_list(cls_bpf->exts, &actions);
- list_for_each_entry(a, &actions, list) {
- if (is_tcf_gact_shot(a))
- return NN_ACT_TC_DROP;
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- if (is_tcf_mirred_egress_redirect(a) &&
- tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
- return NN_ACT_TC_REDIR;
- }
+ nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ if (!nfp_prog->prog)
+ return -ENOMEM;
- return -EOPNOTSUPP;
+ return nfp_bpf_jit(nfp_prog);
}
-static int
-nfp_net_bpf_offload_prepare(struct nfp_net *nn,
- struct tc_cls_bpf_offload *cls_bpf,
- struct nfp_bpf_result *res,
- void **code, dma_addr_t *dma_addr, u16 max_instr)
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
{
- unsigned int code_sz = max_instr * sizeof(u64);
- enum nfp_bpf_action_type act;
- u16 start_off, done_off;
- unsigned int max_mtu;
- int ret;
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
- return -EOPNOTSUPP;
+ kfree(nfp_prog->prog);
+ nfp_prog_free(nfp_prog);
- ret = nfp_net_bpf_get_act(nn, cls_bpf);
- if (ret < 0)
- return ret;
- act = ret;
+ return 0;
+}
+
+static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+ unsigned int max_mtu;
+ dma_addr_t dma_addr;
+ int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
@@ -164,134 +166,80 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return -EOPNOTSUPP;
}
- start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
-
- *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
- if (!*code)
+ dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ nfp_prog->prog_len * sizeof(u64),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(nn->dp.dev, dma_addr))
return -ENOMEM;
- ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
- max_instr, res);
- if (ret)
- goto out;
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
- return 0;
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
-out:
- dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
- return ret;
+ dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
+ DMA_TO_DEVICE);
+
+ return err;
}
-static void
-nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
- void *code, dma_addr_t dma_addr,
- unsigned int code_sz, unsigned int n_instr,
- bool dense_mode)
+static void nfp_net_bpf_start(struct nfp_net *nn)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
- u64 bpf_addr = dma_addr;
int err;
- nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
-
- if (dense_mode)
- bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
-
- nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
- nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
-
- /* Load up the JITed code */
- err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
- if (err)
- nn_err(nn, "FW command error while loading BPF: %d\n", err);
-
/* Enable passing packets through BPF function */
nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
-
- dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
-
- nfp_net_bpf_stats_reset(nn);
- mod_timer(&priv->rx_filter_stats_timer,
- jiffies + NFP_NET_STAT_POLL_IVL);
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
-
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
- spin_lock_bh(&priv->rx_filter_lock);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
- spin_unlock_bh(&priv->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
- del_timer_sync(&priv->rx_filter_stats_timer);
- nn->dp.bpf_offload_skip_sw = 0;
-
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
+ bool old_prog)
{
- struct nfp_bpf_result res;
- dma_addr_t dma_addr;
- u16 max_instr;
- void *code;
int err;
- max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ if (prog && !prog->aux->offload)
+ return -EINVAL;
- switch (cls_bpf->command) {
- case TC_CLSBPF_REPLACE:
- /* There is nothing stopping us from implementing seamless
- * replace but the simple method of loading I adopted in
- * the firmware does not handle atomic replace (i.e. we have to
- * stop the BPF offload and re-enable it). Leaking-in a few
- * frames which didn't have BPF applied in the hardware should
- * be fine if software fallback is available, though.
- */
- if (nn->dp.bpf_offload_skip_sw)
- return -EBUSY;
-
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
- &dma_addr, max_instr);
- if (err)
- return err;
-
- nfp_net_bpf_stop(nn);
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
- dma_addr, max_instr * sizeof(u64),
- res.n_instr, res.dense_mode);
- return 0;
+ if (prog && old_prog) {
+ u8 cap;
- case TC_CLSBPF_ADD:
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
+ if (!(cap & NFP_NET_BPF_CAP_RELO)) {
+ nn_err(nn, "FW does not support live reload\n");
return -EBUSY;
+ }
+ }
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
- &dma_addr, max_instr);
- if (err)
- return err;
-
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
- dma_addr, max_instr * sizeof(u64),
- res.n_instr, res.dense_mode);
- return 0;
+ /* Something else is loaded, different program type? */
+ if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
- case TC_CLSBPF_DESTROY:
+ if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- case TC_CLSBPF_STATS:
- return nfp_net_bpf_stats_update(nn, cls_bpf);
+ err = nfp_net_bpf_load(nn, prog);
+ if (err)
+ return err;
+
+ if (!old_prog)
+ nfp_net_bpf_start(nn);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 5b783a91b115..8d43491ddd6b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -40,12 +40,6 @@
#include "main.h"
-/* Analyzer/verifier definitions */
-struct nfp_bpf_analyzer_priv {
- struct nfp_prog *prog;
- struct nfp_insn_meta *meta;
-};
-
static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
@@ -76,12 +70,12 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
- const struct bpf_verifier_env *env)
+ struct bpf_verifier_env *env)
{
- const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+ const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
u64 imm;
- if (nfp_prog->act == NN_ACT_XDP)
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return 0;
if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
@@ -94,13 +88,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
imm = reg0->var_off.value;
- if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
- reg0->type, imm);
- return -EINVAL;
- }
-
- if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT &&
+ if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
+ imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
pr_info("unsupported exit state: %d, imm: %llx\n",
@@ -112,29 +101,76 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
static int
-nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
- const struct bpf_verifier_env *env, u8 reg)
+nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
+ struct nfp_insn_meta *meta,
+ const struct bpf_reg_state *reg)
{
- if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ s32 old_off, new_off;
+
+ if (!tnum_is_const(reg->var_off)) {
+ pr_info("variable ptr stack access\n");
return -EINVAL;
+ }
- return 0;
+ if (meta->ptr.type == NOT_INIT)
+ return 0;
+
+ old_off = meta->ptr.off + meta->ptr.var_off.value;
+ new_off = reg->off + reg->var_off.value;
+
+ meta->ptr_not_const |= old_off != new_off;
+
+ if (!meta->ptr_not_const)
+ return 0;
+
+ if (old_off % 4 == new_off % 4)
+ return 0;
+
+ pr_info("stack access changed location was:%d is:%d\n",
+ old_off, new_off);
+ return -EINVAL;
}
static int
-nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env, u8 reg_no)
{
- struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
- struct nfp_insn_meta *meta = priv->meta;
+ const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
+ int err;
- meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
- priv->meta = meta;
+ if (reg->type != PTR_TO_CTX &&
+ reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_PACKET) {
+ pr_info("unsupported ptr type: %d\n", reg->type);
+ return -EINVAL;
+ }
- if (meta->insn.src_reg == BPF_REG_10 ||
- meta->insn.dst_reg == BPF_REG_10) {
- pr_err("stack not yet supported\n");
+ if (reg->type == PTR_TO_STACK) {
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ if (err)
+ return err;
+ }
+
+ if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
+ pr_info("ptr type changed for instruction %d -> %d\n",
+ meta->ptr.type, reg->type);
return -EINVAL;
}
+
+ meta->ptr = *reg;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
+ nfp_prog->verifier_meta = meta;
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
pr_err("program uses extended registers - jit hardening?\n");
@@ -142,37 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
}
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
- return nfp_bpf_check_exit(priv->prog, env);
+ return nfp_bpf_check_exit(nfp_prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.src_reg);
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
+ meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.dst_reg);
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
+ meta->insn.dst_reg);
return 0;
}
-static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
-
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
-{
- struct nfp_bpf_analyzer_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->prog = nfp_prog;
- priv->meta = nfp_prog_first_meta(nfp_prog);
-
- ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
-
- kfree(priv);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 8ea9320014ee..c1c595f8bb87 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -36,7 +36,9 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
#include "cmsg.h"
#include "main.h"
@@ -45,13 +47,9 @@
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
- u16 tmp_pop_vlan_op;
- tmp_pop_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
-
- pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
+ pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
+ pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
pop_vlan->reserved = 0;
}
@@ -60,64 +58,373 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
const struct tc_action *action)
{
size_t act_size = sizeof(struct nfp_fl_push_vlan);
- struct tcf_vlan *vlan = to_vlan(action);
u16 tmp_push_vlan_tci;
- u16 tmp_push_vlan_op;
-
- tmp_push_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
- push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
- /* Set action push vlan parameters. */
+ push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
+ push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
tmp_push_vlan_tci =
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
NFP_FL_PUSH_VLAN_CFI;
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
+static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (!out_dev->rtnl_link_ops)
+ return false;
+
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+
+ return false;
+}
+
static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
struct nfp_fl_payload *nfp_flow, bool last,
- struct net_device *in_dev)
+ struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
+ int *tun_out_cnt)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
- u16 tmp_output_op;
+ u16 tmp_flags;
int ifindex;
- /* Set action opcode to output action. */
- tmp_output_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
-
- output->a_op = cpu_to_be16(tmp_output_op);
-
- /* Set action output parameters. */
- output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
+ output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
+ output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
ifindex = tcf_mirred_ifindex(action);
out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
if (!out_dev)
return -EOPNOTSUPP;
- /* Only offload egress ports are on the same device as the ingress
- * port.
+ tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
+
+ if (tun_type) {
+ /* Verify the egress netdev matches the tunnel type. */
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ return -EOPNOTSUPP;
+
+ if (*tun_out_cnt)
+ return -EOPNOTSUPP;
+ (*tun_out_cnt)++;
+
+ output->flags = cpu_to_be16(tmp_flags |
+ NFP_FL_OUT_FLAGS_USE_TUN);
+ output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ } else {
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(tmp_flags);
+
+ /* Only offload if egress ports are on the same device as the
+ * ingress port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ if (!nfp_netdev_is_nfp_repr(out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+ }
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+{
+ struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+
+ return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+}
+
+static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
+{
+ size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
+ struct nfp_fl_pre_tunnel *pre_tun_act;
+
+ /* Pre_tunnel action must be first on action list.
+ * If other actions already exist they need pushed forward.
*/
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (act_len)
+ memmove(act_data + act_size, act_data, act_len);
+
+ pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
+
+ memset(pre_tun_act, 0, act_size);
+
+ pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
+ pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return pre_tun_act;
+}
+
+static int
+nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun)
+{
+ struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
+ size_t act_size = sizeof(struct nfp_fl_set_vxlan);
+ u32 tmp_set_vxlan_type_index = 0;
+ /* Currently support one pre-tunnel so index is always 0. */
+ int pretun_idx = 0;
+
+ if (vxlan->options_len) {
+ /* Do not support options e.g. vxlan gpe. */
return -EOPNOTSUPP;
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ }
+
+ set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* Set tunnel type and pre-tunnel index. */
+ tmp_set_vxlan_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+
+ set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
+
+ set_vxlan->tun_id = vxlan->key.tun_id;
+ set_vxlan->tun_flags = vxlan->key.tun_flags;
+ set_vxlan->ipv4_ttl = vxlan->key.ttl;
+ set_vxlan->ipv4_tos = vxlan->key.tos;
+
+ /* Complete pre_tunnel action. */
+ pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+
+ return 0;
+}
+
+static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
+{
+ u32 oldvalue = get_unaligned((u32 *)p_exact);
+ u32 oldmask = get_unaligned((u32 *)p_mask);
+
+ value &= mask;
+ value |= oldvalue & ~mask;
+
+ put_unaligned(oldmask | mask, (u32 *)p_mask);
+ put_unaligned(value, (u32 *)p_exact);
+}
+
+static int
+nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_eth *set_eth)
+{
+ u32 exact, mask;
+
+ if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
return -EOPNOTSUPP;
- nfp_flow->meta.shortcut = output->port;
+ nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
+ &set_eth->eth_addr_mask[off]);
+
+ set_eth->reserved = cpu_to_be16(0);
+ set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
+ set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ip4_addrs *set_ip_addr)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ switch (off) {
+ case offsetof(struct iphdr, daddr):
+ set_ip_addr->ipv4_dst_mask = mask;
+ set_ip_addr->ipv4_dst = exact;
+ break;
+ case offsetof(struct iphdr, saddr):
+ set_ip_addr->ipv4_src_mask = mask;
+ set_ip_addr->ipv4_src = exact;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_ip_addr->reserved = cpu_to_be16(0);
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static void
+nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_addr *ip6)
+{
+ ip6->ipv6[idx % 4].mask = mask;
+ ip6->ipv6[idx % 4].exact = exact;
+
+ ip6->reserved = cpu_to_be16(0);
+ ip6->head.jump_id = opcode_tag;
+ ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
+}
+
+static int
+nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ipv6_addr *ip_dst,
+ struct nfp_fl_set_ipv6_addr *ip_src)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ if (off < offsetof(struct ipv6hdr, saddr))
+ return -EOPNOTSUPP;
+ else if (off < offsetof(struct ipv6hdr, daddr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ exact, mask, ip_src);
+ else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ exact, mask, ip_dst);
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_tport *set_tport, int opcode)
+{
+ u32 exact, mask;
+
+ if (off)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
+ set_tport->tp_port_mask);
+
+ set_tport->reserved = cpu_to_be16(0);
+ set_tport->head.jump_id = opcode;
+ set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+{
+ struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ip4_addrs set_ip_addr;
+ struct nfp_fl_set_tport set_tport;
+ struct nfp_fl_set_eth set_eth;
+ enum pedit_header_type htype;
+ int idx, nkeys, err;
+ size_t act_size;
+ u32 offset, cmd;
+
+ memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
+ memset(&set_ip6_src, 0, sizeof(set_ip6_src));
+ memset(&set_ip_addr, 0, sizeof(set_ip_addr));
+ memset(&set_tport, 0, sizeof(set_tport));
+ memset(&set_eth, 0, sizeof(set_eth));
+ nkeys = tcf_pedit_nkeys(action);
+
+ for (idx = 0; idx < nkeys; idx++) {
+ cmd = tcf_pedit_cmd(action, idx);
+ htype = tcf_pedit_htype(action, idx);
+ offset = tcf_pedit_offset(action, idx);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
+ return -EOPNOTSUPP;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(action, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
+ &set_ip6_src);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (err)
+ return err;
+ }
+
+ if (set_eth.head.len_lw) {
+ act_size = sizeof(set_eth);
+ memcpy(nfp_action, &set_eth, act_size);
+ *a_len += act_size;
+ } else if (set_ip_addr.head.len_lw) {
+ act_size = sizeof(set_ip_addr);
+ memcpy(nfp_action, &set_ip_addr, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ /* TC compiles set src and dst IPv6 address as a single action,
+ * the hardware requires this to be 2 separate actions.
+ */
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+
+ act_size = sizeof(set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
+ act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw) {
+ act_size = sizeof(set_ip6_dst);
+ memcpy(nfp_action, &set_ip6_dst, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_src.head.len_lw) {
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+ } else if (set_tport.head.len_lw) {
+ act_size = sizeof(set_tport);
+ memcpy(nfp_action, &set_tport, act_size);
+ *a_len += act_size;
+ }
return 0;
}
@@ -125,8 +432,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
static int
nfp_flower_loop_action(const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -139,7 +449,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -149,7 +460,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -172,6 +484,32 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
+ } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ /* Pre-tunnel action is required for tunnel encap.
+ * This checks for next hop entries on NFP.
+ * If none, the packet falls back before applying other actions.
+ */
+ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
+ sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ *a_len += sizeof(struct nfp_fl_pre_tunnel);
+
+ s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_set_vxlan);
+ } else if (is_tcf_tunnel_release(a)) {
+ /* Tunnel decap is handled by default so accept action. */
+ return 0;
+ } else if (is_tcf_pedit(a)) {
+ if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ return -EOPNOTSUPP;
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -184,18 +522,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err;
+ int act_len, act_cnt, err, tun_out_cnt;
+ enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
+ tun_type = NFP_FL_TUNNEL_NONE;
act_len = 0;
act_cnt = 0;
+ tun_out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
+ &tun_type, &tun_out_cnt);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index c3ca05d10fe1..e98bb9cdb6a3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -38,17 +38,10 @@
#include <net/dst_metadata.h>
#include "main.h"
-#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"
-#define nfp_flower_cmsg_warn(app, fmt, args...) \
- do { \
- if (net_ratelimit()) \
- nfp_warn((app)->cpp, fmt, ## args); \
- } while (0)
-
static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
@@ -57,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type)
+ enum nfp_flower_cmsg_type_port type, gfp_t flag)
{
struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN;
- skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+ skb = nfp_app_ctrl_msg_alloc(app, size, flag);
if (!skb)
return NULL;
@@ -85,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
unsigned int size;
size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
- skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
+ skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
+ GFP_KERNEL);
if (!skb)
return NULL;
@@ -116,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
- NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -188,6 +182,15 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
nfp_flower_rx_flow_stats(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
+ nfp_tunnel_request_route(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
+ nfp_tunnel_keep_alive(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
+ /* Acks from the NFP that the route is added - ignore. */
+ break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2ec60344236..66070741d55f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include "../nfp_app.h"
+#include "../nfpcore/nfp_cpp.h"
#define NFP_FLOWER_LAYER_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
@@ -56,6 +57,11 @@
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
+#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -67,13 +73,18 @@
#define NFP_FL_LW_SIZ 2
/* Action opcodes */
-#define NFP_FL_ACTION_OPCODE_OUTPUT 0
-#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
-#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
-#define NFP_FL_ACTION_OPCODE_NUM 32
-
-#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
-#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
#define NFP_FL_OUT_FLAGS_USE_TUN BIT(4)
@@ -83,21 +94,74 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+/* Tunnel ports */
+#define NFP_FL_PORT_TYPE_TUN 0x50000000
+#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+enum nfp_flower_tun_type {
+ NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_VXLAN = 2,
+};
+
+struct nfp_fl_act_head {
+ u8 jump_id;
+ u8 len_lw;
+};
+
+struct nfp_fl_set_eth {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 eth_addr_mask[ETH_ALEN * 2];
+ u8 eth_addr_val[ETH_ALEN * 2];
+};
+
+struct nfp_fl_set_ip4_addrs {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_src_mask;
+ __be32 ipv4_src;
+ __be32 ipv4_dst_mask;
+ __be32 ipv4_dst;
+};
+
+struct nfp_fl_set_ipv6_addr {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ struct {
+ __be32 mask;
+ __be32 exact;
+ } ipv6[4];
+};
+
+struct nfp_fl_set_tport {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 tp_port_mask[4];
+ u8 tp_port_val[4];
+};
+
struct nfp_fl_output {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 flags;
__be32 port;
};
struct nfp_fl_push_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
__be16 vlan_tpid;
__be16 vlan_tci;
};
struct nfp_fl_pop_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
};
@@ -115,6 +179,25 @@ struct nfp_flower_meta_one {
u16 reserved;
};
+struct nfp_fl_pre_tunnel {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_dst;
+ /* reserved for use with IPv6 addresses */
+ __be32 extra[3];
+};
+
+struct nfp_fl_set_vxlan {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be64 tun_id;
+ __be32 tun_type_index;
+ __be16 tun_flags;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be32 extra[2];
+} __packed;
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -230,6 +313,36 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | gpe_flags | Reserved | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vxlan {
+ __be32 ip_src;
+ __be32 ip_dst;
+ __be16 tun_flags;
+ u8 tos;
+ u8 ttl;
+ u8 gpe_flags;
+ u8 reserved[2];
+ u8 nxt_proto;
+ __be32 tun_id;
+};
+
+#define NFP_FL_TUN_VNI_OFFSET 8
+
/* The base header for a control message packet.
* Defines an 8-bit version, and an 8-bit type, padded
* to a 32-bit word. Rest of the packet is type-specific.
@@ -249,6 +362,11 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
@@ -282,6 +400,7 @@ enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3,
};
enum nfp_flower_cmsg_port_vnic_type {
@@ -323,6 +442,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
+static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
+{
+ return skb->len - NFP_FLOWER_CMSG_HLEN;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
@@ -334,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type);
+ enum nfp_flower_cmsg_type_port type, gfp_t flag);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 91fe03617106..e0283bb24f06 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -142,8 +142,8 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
- struct nfp_reprs *reprs, *old_reprs;
enum nfp_port_type port_type;
+ struct nfp_reprs *reprs;
const u8 queue = 0;
int i, err;
@@ -194,11 +194,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
reprs->reprs[i]->name);
}
- old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, repr_type, reprs);
return 0;
err_reprs_clean:
@@ -222,8 +218,8 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
- struct nfp_reprs *reprs, *old_reprs;
struct sk_buff *ctrl_skb;
+ struct nfp_reprs *reprs;
unsigned int i;
int err;
@@ -280,11 +276,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port, reprs->reprs[phys_port]->name);
}
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
/* The MAC_REPR control message should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
@@ -436,6 +428,16 @@ static void nfp_flower_clean(struct nfp_app *app)
app->priv = NULL;
}
+static int nfp_flower_start(struct nfp_app *app)
+{
+ return nfp_tunnel_config_start(app);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_tunnel_config_stop(app);
+}
+
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
@@ -453,6 +455,9 @@ const struct nfp_app_type app_flower = {
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c20dd00a1cae..c90e72b7ff5a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -58,6 +58,8 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
+#define NFP_FL_VXLAN_PORT 4789
+
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
struct timespec64 *last_used;
@@ -82,6 +84,18 @@ struct nfp_fl_stats_id {
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
+ * @nfp_mac_off_list: List of MAC addresses to offload
+ * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list: List of IPv4 addresses to offload
+ * @nfp_neigh_off_list: List of neighbour offloads
+ * @nfp_mac_off_lock: Lock for the MAC address list
+ * @nfp_mac_index_lock: Lock for the MAC index list
+ * @nfp_ipv4_off_lock: Lock for the IPv4 address list
+ * @nfp_neigh_off_lock: Lock for the neighbour address list
+ * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
+ * @nfp_mac_off_count: Number of MACs in address list
+ * @nfp_tun_mac_nb: Notifier to monitor link state
+ * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -94,6 +108,18 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
+ struct list_head nfp_mac_off_list;
+ struct list_head nfp_mac_index_list;
+ struct list_head nfp_ipv4_off_list;
+ struct list_head nfp_neigh_off_list;
+ struct mutex nfp_mac_off_lock;
+ struct mutex nfp_mac_index_lock;
+ struct mutex nfp_ipv4_off_lock;
+ spinlock_t nfp_neigh_off_lock;
+ struct ida nfp_mac_off_ids;
+ int nfp_mac_off_count;
+ struct notifier_block nfp_tun_mac_nb;
+ struct notifier_block nfp_tun_neigh_nb;
};
struct nfp_fl_key_ls {
@@ -126,6 +152,7 @@ struct nfp_fl_payload {
struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
+ __be32 nfp_tun_ipv4_addr;
char *unmasked_data;
char *mask_data;
char *action_data;
@@ -163,4 +190,12 @@ nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+int nfp_tunnel_config_start(struct nfp_app *app);
+void nfp_tunnel_config_stop(struct nfp_app *app);
+void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index d25b5038c3a2..60614d4f0e22 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -77,14 +77,17 @@ nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version)
+ bool mask_version, enum nfp_flower_tun_type tun_type)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
return 0;
}
- frame->in_port = cpu_to_be32(cmsg_port);
+ if (tun_type)
+ frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ else
+ frame->in_port = cpu_to_be32(cmsg_port);
return 0;
}
@@ -108,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
- if (mask_version)
- frame->mpls_lse = cpu_to_be32(~0);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_dissector_key_mpls *mpls;
+ u32 t_mpls;
+
+ mpls = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_MPLS,
+ target);
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ frame->mpls_lse = cpu_to_be32(t_mpls);
+ }
}
static void
@@ -140,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
@@ -158,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
@@ -169,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard LABEL/TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
@@ -187,6 +211,51 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version, __be32 *tun_dst)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_keyid *vni;
+
+ /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
+ memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ u32 temp_vni;
+
+ vni = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ target);
+ temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ frame->tun_id = cpu_to_be32(temp_vni);
+ }
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ vxlan_ips =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ target);
+ frame->ip_src = vxlan_ips->src;
+ frame->ip_dst = vxlan_ips->dst;
+ *tun_dst = vxlan_ips->dst;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -194,10 +263,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ __be32 tun_dst, tun_dst_mask = 0;
+ struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
+ tun_type = NFP_FL_TUNNEL_VXLAN;
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -216,14 +291,14 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
- false);
+ false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
- true);
+ true, tun_type);
if (err)
return err;
@@ -291,5 +366,28 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ /* Populate Exact VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
+ flow, false, &tun_dst);
+ /* Populate Mask VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
+ flow, true, &tun_dst_mask);
+ ext += sizeof(struct nfp_flower_vxlan);
+ msk += sizeof(struct nfp_flower_vxlan);
+
+ /* Configure tunnel end point MAC. */
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ netdev_repr = netdev_priv(netdev);
+ nfp_tunnel_write_macs(netdev_repr->app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3226ddc55f99..193520ef23f0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -140,7 +140,7 @@ exit_rcu_unlock:
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
- unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_fl_stats_frame *stats_frame;
unsigned char *msg;
int i;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index a18b4d2b1d3e..cdbb5464b790 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,26 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
static int
nfp_flower_xmit_flow(struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, u8 mtype)
@@ -77,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+ skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -117,7 +135,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
- struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -125,15 +142,58 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
+ /* If any tun dissector is used then the required set must be used. */
+ if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ return -EOPNOTSUPP;
+
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
+ struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
+ struct flow_dissector_key_control *enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->key);
+ if (mask_enc_ctl->addr_type != 0xffff ||
+ enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
+
+ /* These fields are already verified as used. */
+ mask_ipv4 =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ flow->mask);
+ if (mask_ipv4->dst != cpu_to_be32(~0))
+ return -EOPNOTSUPP;
+
+ mask_enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->mask);
+ enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->key);
+
+ if (mask_enc_ports->dst != cpu_to_be16(~0) ||
+ enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ return -EOPNOTSUPP;
+
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_vxlan);
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -146,34 +206,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
flow->key);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
- mask_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- flow->mask);
-
- key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
-
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -184,11 +225,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
- /* Currently we do not offload MPLS. */
- case cpu_to_be16(ETH_P_MPLS_UC):
- case cpu_to_be16(ETH_P_MPLS_MC):
- return -EOPNOTSUPP;
-
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -252,6 +288,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
if (!flow_pay->action_data)
goto err_free_mask;
+ flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
@@ -361,6 +398,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_flow;
+ if (nfp_flow->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+
err = nfp_flower_xmit_flow(netdev, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
@@ -409,6 +449,10 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower)
{
+ if (!eth_proto_is_802_3(flower->common.protocol) ||
+ flower->common.chain_index)
+ return -EOPNOTSUPP;
+
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
@@ -421,16 +465,53 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
-int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, void *type_data)
+static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- struct tc_cls_flower_offload *cls_flower = type_data;
+ struct nfp_repr *repr = cb_priv;
- if (type != TC_SETUP_CLSFLOWER ||
- !is_classid_clsact_ingress(cls_flower->common.classid) ||
- !eth_proto_is_802_3(cls_flower->common.protocol) ||
- cls_flower->common.chain_index)
+ if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP;
- return nfp_flower_repr_offload(app, netdev, cls_flower);
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_flower_setup_tc_block(struct net_device *netdev,
+ struct tc_block_offload *f)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr, repr);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
new file mode 100644
index 000000000000..b03f22f29612
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <net/netevent.h>
+#include <linux/idr.h>
+#include <net/dst_metadata.h>
+#include <net/arp.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_net.h"
+
+#define NFP_FL_MAX_ROUTES 32
+
+/**
+ * struct nfp_tun_active_tuns - periodic message of active tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @ipv4: dest IPv4 address of active route
+ * @egress_port: port the encapsulated packet egressed
+ * @extra: reserved for future use
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info {
+ __be32 ipv4;
+ __be32 egress_port;
+ __be32 extra[2];
+ } tun_info[];
+};
+
+/**
+ * struct nfp_tun_neigh - neighbour/route entry on the NFP
+ * @dst_ipv4: destination IPv4 address
+ * @src_ipv4: source IPv4 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv4
+ */
+struct nfp_tun_neigh {
+ __be32 dst_ipv4;
+ __be32 src_ipv4;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
+ * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv4_addr: destination ipv4 address for route
+ * @reserved: reserved for future use
+ */
+struct nfp_tun_req_route_ipv4 {
+ __be32 ingress_port;
+ __be32 ipv4_addr;
+ __be32 reserved[2];
+};
+
+/**
+ * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
+ * @ipv4_addr: destination of route
+ * @list: list pointer
+ */
+struct nfp_ipv4_route_entry {
+ __be32 ipv4_addr;
+ struct list_head list;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX 32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+ __be32 count;
+ __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv4_addr_entry {
+ __be32 ipv4_addr;
+ int ref_count;
+ struct list_head list;
+};
+
+/**
+ * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
+ * @reserved: reserved for future use
+ * @count: number of MAC addresses in the message
+ * @index: index of MAC address in the lookup table
+ * @addr: interface MAC address
+ * @addresses: series of MACs to offload
+ */
+struct nfp_tun_mac_addr {
+ __be16 reserved;
+ __be16 count;
+ struct index_mac_addr {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ } addresses[];
+};
+
+/**
+ * struct nfp_tun_mac_offload_entry - list of MACs to offload
+ * @index: index of MAC address for offloading
+ * @addr: interface MAC address
+ * @list: list pointer
+ */
+struct nfp_tun_mac_offload_entry {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define NFP_MAX_MAC_INDEX 0xff
+
+/**
+ * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
+ * @ifindex: netdev ifindex of the device
+ * @index: index of netdevs mac on NFP
+ * @list: list pointer
+ */
+struct nfp_tun_mac_non_nfp_idx {
+ int ifindex;
+ u8 index;
+ struct list_head list;
+};
+
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_active_tuns *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ __be32 ipv4_addr;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_MAX_ROUTES) {
+ nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != sizeof(struct nfp_tun_active_tuns) +
+ sizeof(struct route_ip_info) * count) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ ipv4_addr = payload->tun_info[i].ipv4;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_repr_get(app, port);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ return true;
+
+ return false;
+}
+
+static int
+nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
+ gfp_t flag)
+{
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ return 0;
+}
+
+static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return false;
+}
+
+static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return;
+ }
+ }
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
+ return;
+ }
+
+ entry->ipv4_addr = ipv4_addr;
+ list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void
+nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh payload;
+
+ /* Only offload representor IPv4s for now. */
+ if (!nfp_netdev_is_nfp_repr(netdev))
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh));
+ payload.dst_ipv4 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID)) {
+ nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ /* Trigger ARP to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv4 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&payload, flag);
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct netevent_redirect *redir;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct nfp_app *app;
+ struct rtable *rt;
+ int err;
+
+ switch (event) {
+ case NETEVENT_REDIRECT:
+ redir = (struct netevent_redirect *)ptr;
+ n = redir->neigh;
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = (struct neighbour *)ptr;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ flow.daddr = *(__be32 *)n->primary_key;
+
+ /* Only concerned with route changes for representors. */
+ if (!nfp_netdev_is_nfp_repr(n->dev))
+ return NOTIFY_DONE;
+
+ app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app = app_priv->app;
+
+ /* Only concerned with changes to routes already added to NFP. */
+ if (!nfp_tun_has_route(app, flow.daddr))
+ return NOTIFY_DONE;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
+#else
+ return NOTIFY_DONE;
+#endif
+
+ flow.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+
+ return NOTIFY_OK;
+}
+
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv4 *payload;
+ struct net_device *netdev;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct rtable *rt;
+ int err;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ if (!netdev)
+ goto route_fail_warning;
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup on same namespace as ingress port. */
+ rt = ip_route_output_key(dev_net(netdev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ goto route_fail_warning;
+#else
+ goto route_fail_warning;
+#endif
+
+ /* Get the neighbour entry for the lookup */
+ n = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ ip_rt_put(rt);
+ if (!n)
+ goto route_fail_warning;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
+ neigh_release(n);
+ return;
+
+route_fail_warning:
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+}
+
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_tun_ipv4_addr payload;
+ struct list_head *ptr, *storage;
+ int count;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+ return;
+ }
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ payload.ipv4_addr[count++] = entry->ipv4_addr;
+ }
+ payload.count = cpu_to_be32(count);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+ sizeof(struct nfp_tun_ipv4_addr),
+ &payload, GFP_KERNEL);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count++;
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ return;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return;
+ }
+ entry->ipv4_addr = ipv4;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count--;
+ if (!entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_write_macs(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ struct nfp_tun_mac_addr *payload;
+ struct list_head *ptr, *storage;
+ int mac_count, err, pay_size;
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ if (!priv->nfp_mac_off_count) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ pay_size = sizeof(struct nfp_tun_mac_addr) +
+ sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+
+ payload = kzalloc(pay_size, GFP_KERNEL);
+ if (!payload) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+
+ mac_count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ payload->addresses[mac_count].index = entry->index;
+ ether_addr_copy(payload->addresses[mac_count].addr,
+ entry->addr);
+ mac_count++;
+ }
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ pay_size, payload, GFP_KERNEL);
+
+ kfree(payload);
+
+ if (err) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ /* Write failed so retain list for future retry. */
+ return;
+ }
+
+ /* If list was successfully offloaded, flush it. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ priv->nfp_mac_off_count = 0;
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+ int idx;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ idx = entry->index;
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+ }
+
+ idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (idx < 0) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return -ENOMEM;
+ }
+ entry->ifindex = ifindex;
+ entry->index = idx;
+ list_add_tail(&entry->list, &priv->nfp_mac_index_list);
+ mutex_unlock(&priv->nfp_mac_index_lock);
+
+ return idx;
+}
+
+static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ ida_simple_remove(&priv->nfp_mac_off_ids,
+ entry->index);
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_mac_index_lock);
+}
+
+static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
+ struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ u16 nfp_mac_idx;
+ int port = 0;
+
+ /* Check if MAC should be offloaded. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ return;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_repr_get_port_id(netdev);
+ else if (!nfp_tun_is_netdev_to_offload(netdev))
+ return;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
+ return;
+ }
+
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+ } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
+ } else {
+ /* Must assign our own unique 8-bit index. */
+ int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+
+ if (idx < 0) {
+ nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
+ kfree(entry);
+ return;
+ }
+ nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ }
+
+ entry->index = cpu_to_be16(nfp_mac_idx);
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ priv->nfp_mac_off_count++;
+ list_add_tail(&entry->list, &priv->nfp_mac_off_list);
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_mac_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* If non-nfp netdev then free its offload index. */
+ if (nfp_tun_is_netdev_to_offload(netdev))
+ nfp_tun_del_mac_idx(app, netdev->ifindex);
+ } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
+ event == NETDEV_REGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+
+ /* Force a list write to keep NFP up to date. */
+ nfp_tunnel_write_macs(app);
+ }
+ return NOTIFY_OK;
+}
+
+int nfp_tunnel_config_start(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+ int err;
+
+ /* Initialise priv data for MAC offloading. */
+ priv->nfp_mac_off_count = 0;
+ mutex_init(&priv->nfp_mac_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_off_list);
+ priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
+ mutex_init(&priv->nfp_mac_index_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_index_list);
+ ida_init(&priv->nfp_mac_off_ids);
+
+ /* Initialise priv data for IPv4 offloading. */
+ mutex_init(&priv->nfp_ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
+ /* Initialise priv data for neighbour offloading. */
+ spin_lock_init(&priv->nfp_neigh_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
+ priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ if (err)
+ goto err_free_mac_ida;
+
+ err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ if (err)
+ goto err_unreg_mac_nb;
+
+ /* Parse netdevs already registered for MACs that need offloaded. */
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev)
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+ rtnl_unlock();
+
+ return 0;
+
+err_unreg_mac_nb:
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+err_free_mac_ida:
+ ida_destroy(&priv->nfp_mac_off_ids);
+ return err;
+}
+
+void nfp_tunnel_config_stop(struct nfp_app *app)
+{
+ struct nfp_tun_mac_offload_entry *mac_entry;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *route_entry;
+ struct nfp_tun_mac_non_nfp_idx *mac_idx;
+ struct nfp_ipv4_addr_entry *ip_entry;
+ struct list_head *ptr, *storage;
+
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
+
+ /* Free any memory that may be occupied by MAC list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
+ /* Free any memory that may be occupied by MAC index list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
+ list);
+ list_del(&mac_idx->list);
+ kfree(mac_idx);
+ }
+
+ ida_destroy(&priv->nfp_mac_off_ids);
+
+ /* Free any memory that may be occupied by ipv4 list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ list_del(&ip_entry->list);
+ kfree(ip_entry);
+ }
+
+ /* Free any memory that may be occupied by the route list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
+ list);
+ list_del(&route_entry->list);
+ kfree(route_entry);
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 82c290763529..955a9f44d244 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -42,10 +43,14 @@
#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
- &app_nic,
- &app_bpf,
+ [NFP_APP_CORE_NIC] = &app_nic,
+#ifdef CONFIG_BPF_SYSCALL
+ [NFP_APP_BPF_NIC] = &app_bpf,
+#else
+ [NFP_APP_BPF_NIC] = &app_nic,
+#endif
#ifdef CONFIG_NFP_APP_FLOWER
- &app_flower,
+ [NFP_APP_FLOWER_NIC] = &app_flower,
#endif
};
@@ -101,31 +106,21 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
old = rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
- if (reprs && old) {
- old = ERR_PTR(-EBUSY);
- goto exit_unlock;
- }
-
rcu_assign_pointer(app->reprs[type], reprs);
-exit_unlock:
return old;
}
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(apps); i++)
- if (apps[i]->id == id)
- break;
- if (i == ARRAY_SIZE(apps)) {
+ if (id >= ARRAY_SIZE(apps) || !apps[id]) {
nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
return ERR_PTR(-EINVAL);
}
- if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc))
+ if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
@@ -135,7 +130,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
app->pf = pf;
app->cpp = pf->cpp;
app->pdev = pf->pdev;
- app->type = apps[i];
+ app->type = apps[id];
return app;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index af640b5c2108..54b67c9b8d5b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -36,10 +36,13 @@
#include <net/devlink.h>
+#include <trace/events/devlink.h>
+
#include "nfp_net_repr.h"
struct bpf_prog;
struct net_device;
+struct netdev_bpf;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@@ -81,6 +84,9 @@ extern const struct nfp_app_type app_flower;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
+ * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
+ * @bpf_translate: translate call for dev-specific BPF programs
+ * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -116,6 +122,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
+ int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+ int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+ int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -269,13 +281,46 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
+static inline int
+nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
+{
+ if (!app || !app->type->bpf_verifier_prep)
+ return -EOPNOTSUPP;
+ return app->type->bpf_verifier_prep(app, nn, bpf);
+}
+
+static inline int
+nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_translate)
+ return -EOPNOTSUPP;
+ return app->type->bpf_translate(app, nn, prog);
+}
+
+static inline int
+nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_destroy)
+ return -EOPNOTSUPP;
+ return app->type->bpf_destroy(app, nn, prog);
+}
+
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
+
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0,
+ skb->data, skb->len);
+
app->type->ctrl_msg_rx(app, skb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
new file mode 100644
index 000000000000..830f6de25f47
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "nfp_asm.h"
+
+const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ32] = { 0x00, 0x5c },
+ [CMD_TGT_READ32_LE] = { 0x01, 0x5c },
+ [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
+{
+ bool lm_id, lm_dec = false;
+ u16 val = swreg_value(reg);
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ switch (swreg_lm_mode(reg)) {
+ case NN_LM_MOD_NONE:
+ if (val & ~UR_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+ return UR_REG_LM | FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ val;
+ case NN_LM_MOD_DEC:
+ lm_dec = true;
+ /* fall through */
+ case NN_LM_MOD_INC:
+ if (val) {
+ pr_err("LM offset in inc/dev mode\n");
+ return 0;
+ }
+ return UR_REG_LM | UR_REG_LM_POST_MOD |
+ FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ FIELD_PREP(UR_REG_LM_POST_MOD_DEC, lm_dec);
+ default:
+ pr_err("bad LM mode for unrestricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ }
+
+ pr_err("unrecognized reg encoding %08x\n", reg);
+ return 0;
+}
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = swreg_value(reg);
+ bool lm_id;
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ if (swreg_lm_mode(reg) != NN_LM_MOD_NONE) {
+ pr_err("bad LM mode for restricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+
+ if (val & ~RE_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+
+ return RE_REG_LM | FIELD_PREP(RE_REG_LM_IDX, lm_id) | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ case NN_REG_NNR:
+ pr_err("NNRs used with restricted encoding\n");
+ return 0;
+ }
+
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+}
+
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+#define NFP_USTORE_ECC_POLY_WORDS 7
+#define NFP_USTORE_OP_BITS 45
+
+static const u64 nfp_ustore_ecc_polynomials[NFP_USTORE_ECC_POLY_WORDS] = {
+ 0x0ff800007fffULL,
+ 0x11f801ff801fULL,
+ 0x1e387e0781e1ULL,
+ 0x17cb8e388e22ULL,
+ 0x1af5b2c93244ULL,
+ 0x1f56d5525488ULL,
+ 0x0daf69a46910ULL,
+};
+
+static bool parity(u64 value)
+{
+ return hweight64(value) & 1;
+}
+
+int nfp_ustore_check_valid_no_ecc(u64 insn)
+{
+ if (insn & ~GENMASK_ULL(NFP_USTORE_OP_BITS, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 nfp_ustore_calc_ecc_insn(u64 insn)
+{
+ u8 ecc = 0;
+ int i;
+
+ for (i = 0; i < NFP_USTORE_ECC_POLY_WORDS; i++)
+ ecc |= parity(nfp_ustore_ecc_polynomials[i] & insn) << i;
+
+ return insn | (u64)ecc << NFP_USTORE_OP_BITS;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index d2b535739d2b..74d0c11ab2f9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,6 +34,8 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
+#include <linux/bitfield.h>
+#include <linux/bug.h>
#include <linux/types.h>
#define REG_NONE 0
@@ -43,23 +45,31 @@
#define RE_REG_IMM_encode(x) \
(RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_LM 0x050
+#define RE_REG_LM_IDX 0x008
+#define RE_REG_LM_IDX_MAX 0x7
#define RE_REG_XFR 0x080
#define UR_REG_XFR 0x180
+#define UR_REG_LM 0x200
+#define UR_REG_LM_IDX 0x020
+#define UR_REG_LM_POST_MOD 0x010
+#define UR_REG_LM_POST_MOD_DEC 0x001
+#define UR_REG_LM_IDX_MAX 0xf
#define UR_REG_NN 0x280
#define UR_REG_NO_DST 0x300
#define UR_REG_IMM UR_REG_NO_DST
#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
#define UR_REG_IMM_MAX 0x0ffULL
-#define OP_BR_BASE 0x0d800000020ULL
-#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
-#define OP_BR_MASK 0x0000000001fULL
-#define OP_BR_EV_PIP 0x00000000300ULL
-#define OP_BR_CSS 0x0000003c000ULL
-#define OP_BR_DEFBR 0x00000300000ULL
-#define OP_BR_ADDR_LO 0x007ffc00000ULL
-#define OP_BR_ADDR_HI 0x10000000000ULL
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
#define nfp_is_br(_insn) \
(((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
@@ -82,30 +92,33 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
-#define OP_BBYTE_BASE 0x0c800000000ULL
-#define OP_BB_A_SRC 0x000000000ffULL
-#define OP_BB_BYTE 0x00000000300ULL
-#define OP_BB_B_SRC 0x0000003fc00ULL
-#define OP_BB_I8 0x00000040000ULL
-#define OP_BB_EQ 0x00000080000ULL
-#define OP_BB_DEFBR 0x00000300000ULL
-#define OP_BB_ADDR_LO 0x007ffc00000ULL
-#define OP_BB_ADDR_HI 0x10000000000ULL
-
-#define OP_BALU_BASE 0x0e800000000ULL
-#define OP_BA_A_SRC 0x000000003ffULL
-#define OP_BA_B_SRC 0x000000ffc00ULL
-#define OP_BA_DEFBR 0x00000300000ULL
-#define OP_BA_ADDR_HI 0x0007fc00000ULL
-
-#define OP_IMMED_A_SRC 0x000000003ffULL
-#define OP_IMMED_B_SRC 0x000000ffc00ULL
-#define OP_IMMED_IMM 0x0000ff00000ULL
-#define OP_IMMED_WIDTH 0x00060000000ULL
-#define OP_IMMED_INV 0x00080000000ULL
-#define OP_IMMED_SHIFT 0x00600000000ULL
-#define OP_IMMED_BASE 0x0f000000000ULL
-#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+#define OP_BB_SRC_LMEXTN 0x40000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_IMMED_SRC_LMEXTN 0x40000000000ULL
+#define OP_IMMED_DST_LMEXTN 0x80000000000ULL
enum immed_width {
IMMED_WIDTH_ALL = 0,
@@ -119,17 +132,19 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
-#define OP_SHF_BASE 0x08000000000ULL
-#define OP_SHF_A_SRC 0x000000000ffULL
-#define OP_SHF_SC 0x00000000300ULL
-#define OP_SHF_B_SRC 0x0000003fc00ULL
-#define OP_SHF_I8 0x00000040000ULL
-#define OP_SHF_SW 0x00000080000ULL
-#define OP_SHF_DST 0x0000ff00000ULL
-#define OP_SHF_SHIFT 0x001f0000000ULL
-#define OP_SHF_OP 0x00e00000000ULL
-#define OP_SHF_DST_AB 0x01000000000ULL
-#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_SRC_LMEXTN 0x40000000000ULL
+#define OP_SHF_DST_LMEXTN 0x80000000000ULL
enum shf_op {
SHF_OP_NONE = 0,
@@ -139,24 +154,27 @@ enum shf_op {
enum shf_sc {
SHF_SC_R_ROT = 0,
+ SHF_SC_NONE = SHF_SC_R_ROT,
SHF_SC_R_SHF = 1,
SHF_SC_L_SHF = 2,
SHF_SC_R_DSHF = 3,
};
-#define OP_ALU_A_SRC 0x000000003ffULL
-#define OP_ALU_B_SRC 0x000000ffc00ULL
-#define OP_ALU_DST 0x0003ff00000ULL
-#define OP_ALU_SW 0x00040000000ULL
-#define OP_ALU_OP 0x00f80000000ULL
-#define OP_ALU_DST_AB 0x01000000000ULL
-#define OP_ALU_BASE 0x0a000000000ULL
-#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
ALU_OP_NONE = 0x00,
ALU_OP_ADD = 0x01,
- ALU_OP_NEG = 0x04,
+ ALU_OP_NOT = 0x04,
ALU_OP_AND = 0x08,
ALU_OP_SUB_C = 0x0d,
ALU_OP_ADD_C = 0x11,
@@ -170,26 +188,28 @@ enum alu_dst_ab {
ALU_DST_B = 1,
};
-#define OP_LDF_BASE 0x0c000000000ULL
-#define OP_LDF_A_SRC 0x000000000ffULL
-#define OP_LDF_SC 0x00000000300ULL
-#define OP_LDF_B_SRC 0x0000003fc00ULL
-#define OP_LDF_I8 0x00000040000ULL
-#define OP_LDF_SW 0x00000080000ULL
-#define OP_LDF_ZF 0x00000100000ULL
-#define OP_LDF_BMASK 0x0000f000000ULL
-#define OP_LDF_SHF 0x001f0000000ULL
-#define OP_LDF_WR_AB 0x20000000000ULL
-
-#define OP_CMD_A_SRC 0x000000000ffULL
-#define OP_CMD_CTX 0x00000000300ULL
-#define OP_CMD_B_SRC 0x0000003fc00ULL
-#define OP_CMD_TOKEN 0x000000c0000ULL
-#define OP_CMD_XFER 0x00001f00000ULL
-#define OP_CMD_CNT 0x0000e000000ULL
-#define OP_CMD_SIG 0x000f0000000ULL
-#define OP_CMD_TGT_CMD 0x07f00000000ULL
-#define OP_CMD_MODE 0x1c0000000000ULL
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+#define OP_LDF_SRC_LMEXTN 0x40000000000ULL
+#define OP_LDF_DST_LMEXTN 0x80000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
u8 token;
@@ -198,12 +218,17 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
- CMD_TGT_WRITE8,
+ CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_READ32,
+ CMD_TGT_READ32_LE,
+ CMD_TGT_READ32_SWAP,
CMD_TGT_READ_LE,
CMD_TGT_READ_SWAP_LE,
__CMD_TGT_MAP_SIZE,
};
+extern const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE];
+
enum cmd_mode {
CMD_MODE_40b_AB = 0,
CMD_MODE_40b_BA = 1,
@@ -215,11 +240,13 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
-#define OP_LCSR_BASE 0x0fc00000000ULL
-#define OP_LCSR_A_SRC 0x000000003ffULL
-#define OP_LCSR_B_SRC 0x000000ffc00ULL
-#define OP_LCSR_WRITE 0x00000200000ULL
-#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_SRC_LMEXTN 0x40000000000ULL
+#define OP_LCSR_DST_LMEXTN 0x80000000000ULL
enum lcsr_wr_src {
LCSR_WR_AREG,
@@ -227,7 +254,127 @@ enum lcsr_wr_src {
LCSR_WR_IMM,
};
-#define OP_CARB_BASE 0x0e000000000ULL
-#define OP_CARB_OR 0x00000010000ULL
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#define NFP_CSR_ACT_LM_ADDR0 0x64
+#define NFP_CSR_ACT_LM_ADDR1 0x6c
+#define NFP_CSR_ACT_LM_ADDR2 0x94
+#define NFP_CSR_ACT_LM_ADDR3 0x9c
+
+/* Software register representation, independent of operand type */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_LM_IDX GENMASK(23, 22)
+#define NN_REG_LM_IDX_HI BIT(23)
+#define NN_REG_LM_IDX_LO BIT(22)
+#define NN_REG_LM_MOD GENMASK(21, 20)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_GPR_BOTH = NN_REG_GPR_A | NN_REG_GPR_B,
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+ NN_REG_LMEM = BIT(6),
+};
+
+enum nfp_bpf_lm_mode {
+ NN_LM_MOD_NONE = 0,
+ NN_LM_MOD_INC,
+ NN_LM_MOD_DEC,
+};
+
+#define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH)
+#define reg_a(x) __enc_swreg((x), NN_REG_GPR_A)
+#define reg_b(x) __enc_swreg((x), NN_REG_GPR_B)
+#define reg_nnr(x) __enc_swreg((x), NN_REG_NNR)
+#define reg_xfer(x) __enc_swreg((x), NN_REG_XFER)
+#define reg_imm(x) __enc_swreg((x), NN_REG_IMM)
+#define reg_none() __enc_swreg(0, NN_REG_NONE)
+#define reg_lm(x, off) __enc_swreg_lm((x), NN_LM_MOD_NONE, (off))
+#define reg_lm_inc(x) __enc_swreg_lm((x), NN_LM_MOD_INC, 0)
+#define reg_lm_dec(x) __enc_swreg_lm((x), NN_LM_MOD_DEC, 0)
+#define __reg_lm(x, mod, off) __enc_swreg_lm((x), (mod), (off))
+
+typedef __u32 __bitwise swreg;
+
+static inline swreg __enc_swreg(u16 id, u8 type)
+{
+ return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type));
+}
+
+static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off)
+{
+ WARN_ON(id > 3 || (off && mode != NN_LM_MOD_NONE));
+
+ return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) |
+ FIELD_PREP(NN_REG_LM_IDX, id) |
+ FIELD_PREP(NN_REG_LM_MOD, mode) |
+ off);
+}
+
+static inline u32 swreg_raw(swreg reg)
+{
+ return (__force u32)reg;
+}
+
+static inline enum nfp_bpf_reg_type swreg_type(swreg reg)
+{
+ return FIELD_GET(NN_REG_TYPE, swreg_raw(reg));
+}
+
+static inline u16 swreg_value(swreg reg)
+{
+ return FIELD_GET(NN_REG_VAL, swreg_raw(reg));
+}
+
+static inline bool swreg_lm_idx(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_LO, swreg_raw(reg));
+}
+
+static inline bool swreg_lmextn(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_HI, swreg_raw(reg));
+}
+
+static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_MOD, swreg_raw(reg));
+}
+
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg);
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8);
+
+#define NFP_USTORE_PREFETCH_WINDOW 8
+
+int nfp_ustore_check_valid_no_ecc(u64 insn);
+u64 nfp_ustore_calc_ecc_insn(u64 insn);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index f8fa63b66739..35eaccbece36 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -346,6 +346,32 @@ exit_release_fw:
return err < 0 ? err : 1;
}
+static void
+nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf,
+ struct nfp_nsp *nsp)
+{
+ bool needs_reinit = false;
+ int i;
+
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ if (!pf->eth_tbl)
+ return;
+
+ if (!nfp_nsp_has_mac_reinit(nsp))
+ return;
+
+ for (i = 0; i < pf->eth_tbl->count; i++)
+ needs_reinit |= pf->eth_tbl->ports[i].override_changed;
+ if (!needs_reinit)
+ return;
+
+ kfree(pf->eth_tbl);
+ if (nfp_nsp_mac_reinit(nsp))
+ dev_warn(&pdev->dev, "MAC reinit failed\n");
+
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+}
+
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
struct nfp_nsp *nsp;
@@ -366,7 +392,7 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
if (err < 0)
goto exit_close_nsp;
- pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ nfp_nsp_init_ports(pdev, pf, nsp);
pf->nspi = __nfp_nsp_identify(nsp);
if (pf->nspi)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index d51d8237b984..7f9857c276b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
+ * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
* @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector
@@ -437,6 +438,8 @@ struct nfp_net_r_vector {
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+
+ u64 rx_replace_buf_alloc_fail;
u64 tx_errors;
u64 tx_busy;
@@ -473,7 +476,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
@@ -499,7 +501,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_skip_sw:1;
u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e118b5f23996..232044b1b7aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -177,9 +177,9 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
return timed_out ? -EIO : 0;
}
-static void nfp_net_reconfig_timer(unsigned long data)
+static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = (void *)data;
+ struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
} else {
struct page *page;
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- frag = page ? page_address(page) : NULL;
- }
- if (!frag) {
- nn_dp_warn(dp, "Failed to alloc receive page frag\n");
- return NULL;
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
}
*dma_addr = nfp_net_dma_map_rx(dp, frag);
@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
u64_stats_update_end(&r_vec->rx_sync);
/* skb is build based on the frag, free_skb() would free the frag
@@ -1582,26 +1587,6 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
- unsigned int *off, unsigned int *len)
-{
- struct xdp_buff xdp;
- void *orig_data;
- int ret;
-
- xdp.data_hard_start = hard_start;
- xdp.data = data + *off;
- xdp.data_end = data + *off + *len;
-
- orig_data = xdp.data;
- ret = bpf_prog_run_xdp(prog, &xdp);
-
- *len -= xdp.data - orig_data;
- *off += xdp.data - orig_data;
-
- return ret;
-}
-
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1637,6 +1622,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_meta_parsed meta;
struct net_device *netdev;
dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
void *new_frag;
idx = D_IDX(rx_ring, rx_ring->rd_p);
@@ -1715,16 +1701,24 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp) && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- void *hard_start;
+ struct xdp_buff xdp;
int act;
- hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data = orig_data;
+ xdp.data_meta = orig_data;
+ xdp.data_end = orig_data + pkt_len;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len -= xdp.data - orig_data;
+ pkt_off += xdp.data - orig_data;
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
- &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
break;
case XDP_TX:
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
@@ -1792,6 +1786,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
@@ -3382,7 +3378,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
return 0;
}
-static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -3397,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_app_bpf_translate(nn->app, nn,
+ xdp->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_app_bpf_destroy(nn->app, nn,
+ xdp->offload.prog);
default:
return -EINVAL;
}
@@ -3445,7 +3449,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
- .ndo_xdp = nfp_net_xdp,
+ .ndo_bpf = nfp_net_xdp,
};
/**
@@ -3546,8 +3550,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
- setup_timer(&nn->reconfig_timer,
- nfp_net_reconfig_timer, (unsigned long)nn);
+ timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
return nn;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index b0a452ba9039..782d452e0fc2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -255,7 +255,7 @@
* @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 1
+#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index dc016dfec64d..60c8d733a37d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_ET_RVEC_GATHER_STATS 7
+#define NN_RVEC_GATHER_STATS 8
+#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
@@ -243,6 +244,30 @@ nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
+static void
+nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
+ struct ethtool_link_ksettings *c)
+{
+ unsigned int modes;
+
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
+ if (!nfp_eth_can_support_fec(eth_port)) {
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
+ return;
+ }
+
+ modes = nfp_eth_supported_fec_modes(eth_port);
+ if (modes & NFP_FEC_BASER) {
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
+ }
+
+ if (modes & NFP_FEC_REED_SOLOMON) {
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
+ }
+}
+
/**
* nfp_net_get_link_ksettings - Get Link Speed settings
* @netdev: network interface device structure
@@ -277,9 +302,11 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
- if (eth_port)
+ if (eth_port) {
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
+ nfp_net_set_fec_link_mode(eth_port, cmd);
+ }
if (!netif_carrier_ok(netdev))
return 0;
@@ -327,7 +354,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
if (netif_running(netdev)) {
- netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
return -EBUSY;
}
@@ -427,7 +454,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
+ return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -444,6 +471,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err");
+ data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
@@ -454,9 +482,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{
- u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
+ u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
- u64 tmp[NN_ET_RVEC_GATHER_STATS];
+ u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
@@ -468,25 +496,26 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
+ tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[3] = nn->r_vecs[i].hw_csum_tx;
- tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[5] = nn->r_vecs[i].tx_gather;
- tmp[6] = nn->r_vecs[i].tx_lso;
+ tmp[4] = nn->r_vecs[i].hw_csum_tx;
+ tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[6] = nn->r_vecs[i].tx_gather;
+ tmp[7] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
- data += 3;
+ data += NN_RVEC_PER_Q_STATS;
- for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
+ for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
- for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
+ for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
return data;
@@ -683,6 +712,91 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int nfp_port_fec_ethtool_to_nsp(u32 fec)
+{
+ switch (fec) {
+ case ETHTOOL_FEC_AUTO:
+ return NFP_FEC_AUTO_BIT;
+ case ETHTOOL_FEC_OFF:
+ return NFP_FEC_DISABLED_BIT;
+ case ETHTOOL_FEC_RS:
+ return NFP_FEC_REED_SOLOMON_BIT;
+ case ETHTOOL_FEC_BASER:
+ return NFP_FEC_BASER_BIT;
+ default:
+ /* NSP only supports a single mode at a time */
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
+{
+ u32 result = 0;
+
+ if (fec & NFP_FEC_AUTO)
+ result |= ETHTOOL_FEC_AUTO;
+ if (fec & NFP_FEC_BASER)
+ result |= ETHTOOL_FEC_BASER;
+ if (fec & NFP_FEC_REED_SOLOMON)
+ result |= ETHTOOL_FEC_RS;
+ if (fec & NFP_FEC_DISABLED)
+ result |= ETHTOOL_FEC_OFF;
+
+ return result ?: ETHTOOL_FEC_NONE;
+}
+
+static int
+nfp_port_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *param)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ param->active_fec = ETHTOOL_FEC_NONE_BIT;
+ param->fec = ETHTOOL_FEC_NONE_BIT;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nfp_eth_can_support_fec(eth_port))
+ return 0;
+
+ param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
+ param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
+
+ return 0;
+}
+
+static int
+nfp_port_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *param)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err, fec;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nfp_eth_can_support_fec(eth_port))
+ return -EOPNOTSUPP;
+
+ fec = nfp_port_fec_ethtool_to_nsp(param->fec);
+ if (fec < 0)
+ return fec;
+
+ err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
+ if (!err)
+ /* Only refresh if we did something */
+ nfp_net_refresh_port_table(port);
+
+ return err < 0 ? err : 0;
+}
+
/* RX network flow classification (RSS, filters, etc)
*/
static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
@@ -1141,6 +1255,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_channels = nfp_net_set_channels,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
+ .get_fecparam = nfp_port_get_fecparam,
+ .set_fecparam = nfp_port_set_fecparam,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -1152,6 +1268,10 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_link_ksettings = nfp_net_get_link_ksettings,
+ .set_link_ksettings = nfp_net_set_link_ksettings,
+ .get_fecparam = nfp_port_get_fecparam,
+ .set_fecparam = nfp_port_set_fecparam,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index ff373acd28f3..c505014121c4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -597,7 +597,7 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
return -EIO;
}
if (eth_port->override_changed) {
- nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
+ nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
port->type = NFP_PORT_INVALID;
}
@@ -611,6 +611,7 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
+ int err;
lockdep_assert_held(&pf->lock);
@@ -640,6 +641,11 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
kfree(eth_table);
+ /* Resync repr state. This may cause reprs to be removed. */
+ err = nfp_reprs_resync_phys_ports(pf->app);
+ if (err)
+ return err;
+
/* Shoot off the ports which became invalid */
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nn->port || nn->port->type != NFP_PORT_INVALID)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d540a9dc77b3..1bce8c131bb9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -390,3 +390,50 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
return reprs;
}
+
+int nfp_reprs_resync_phys_ports(struct nfp_app *app)
+{
+ struct nfp_reprs *reprs, *old_reprs;
+ struct nfp_repr *repr;
+ int i;
+
+ old_reprs =
+ rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
+ lockdep_is_held(&app->pf->lock));
+ if (!old_reprs)
+ return 0;
+
+ reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < old_reprs->num_reprs; i++) {
+ if (!old_reprs->reprs[i])
+ continue;
+
+ repr = netdev_priv(old_reprs->reprs[i]);
+ if (repr->port->type == NFP_PORT_INVALID)
+ continue;
+
+ reprs->reprs[i] = old_reprs->reprs[i];
+ }
+
+ old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
+ synchronize_rcu();
+
+ /* Now we free up removed representors */
+ for (i = 0; i < old_reprs->num_reprs; i++) {
+ if (!old_reprs->reprs[i])
+ continue;
+
+ repr = netdev_priv(old_reprs->reprs[i]);
+ if (repr->port->type != NFP_PORT_INVALID)
+ continue;
+
+ nfp_app_repr_stop(app, repr);
+ nfp_repr_clean(repr);
+ }
+
+ kfree(old_reprs);
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 32179cad062a..5d4d897bc9c6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -124,5 +124,6 @@ void
nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
+int nfp_reprs_resync_phys_ports(struct nfp_app *app);
#endif /* NFP_NET_REPR_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index e6d2e06b050c..8b1b962cf1d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -112,7 +112,13 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
writew(get_unaligned_be16(mac + 4),
app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC");
+ err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC");
+ if (!err)
+ nfp_info(app->pf->cpp,
+ "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n",
+ mac, vf);
+
+ return err;
}
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 37364555c42b..14a6d1ba51a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -477,6 +477,11 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
}
+int nfp_nsp_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+}
+
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
{
return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index e2f028027c6f..650ca1a5bd21 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,12 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+
+static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 20;
+}
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
@@ -73,6 +79,18 @@ enum nfp_eth_aneg {
NFP_ANEG_DISABLED,
};
+enum nfp_eth_fec {
+ NFP_FEC_AUTO_BIT = 0,
+ NFP_FEC_BASER_BIT,
+ NFP_FEC_REED_SOLOMON_BIT,
+ NFP_FEC_DISABLED_BIT,
+};
+
+#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
@@ -87,6 +105,7 @@ enum nfp_eth_aneg {
* @speed: interface speed (in Mbps)
* @interface: interface (module) plugged in
* @media: media type of the @interface
+ * @fec: forward error correction mode
* @aneg: auto negotiation mode
* @mac_addr: interface MAC address
* @label_port: port id
@@ -99,6 +118,7 @@ enum nfp_eth_aneg {
* @port_type: one of %PORT_* defines for ethtool
* @port_lanes: total number of lanes on the port (sum of lanes of all subports)
* @is_split: is interface part of a split port
+ * @fec_modes_supported: bitmap of FEC modes supported
*/
struct nfp_eth_table {
unsigned int count;
@@ -114,6 +134,7 @@ struct nfp_eth_table {
unsigned int interface;
enum nfp_eth_media media;
+ enum nfp_eth_fec fec;
enum nfp_eth_aneg aneg;
u8 mac_addr[ETH_ALEN];
@@ -133,6 +154,8 @@ struct nfp_eth_table {
unsigned int port_lanes;
bool is_split;
+
+ unsigned int fec_modes_supported;
} ports[0];
};
@@ -143,6 +166,19 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
bool configed);
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+
+static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
+{
+ return !!eth_port->fec_modes_supported;
+}
+
+static inline unsigned int
+nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port)
+{
+ return eth_port->fec_modes_supported;
+}
struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index f6f7c085f8e0..7ca589660e4d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -55,6 +55,8 @@
#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
+#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
@@ -67,6 +69,7 @@
#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -75,6 +78,7 @@
#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -152,6 +156,7 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
unsigned int index, struct nfp_eth_table_port *dst)
{
unsigned int rate;
+ unsigned int fec;
u64 port, state;
port = le64_to_cpu(src->port);
@@ -183,6 +188,18 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 22)
+ return;
+
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT;
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT;
+ if (dst->fec_modes_supported)
+ dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
+
+ dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
}
static void
@@ -469,10 +486,10 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
return nfp_eth_config_commit_end(nsp);
}
-/* Force inline, FIELD_* macroes require masks to be compilation-time known */
-static __always_inline int
+static int
nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
- const u64 mask, unsigned int val, const u64 ctrl_bit)
+ const u64 mask, const unsigned int shift,
+ unsigned int val, const u64 ctrl_bit)
{
union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
unsigned int idx = nfp_nsp_config_idx(nsp);
@@ -489,11 +506,11 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
/* Check if we are already in requested state */
reg = le64_to_cpu(entries[idx].raw[raw_idx]);
- if (val == FIELD_GET(mask, reg))
+ if (val == (reg & mask) >> shift)
return 0;
reg &= ~mask;
- reg |= FIELD_PREP(mask, val);
+ reg |= (val << shift) & mask;
entries[idx].raw[raw_idx] = cpu_to_le64(reg);
entries[idx].control |= cpu_to_le64(ctrl_bit);
@@ -503,6 +520,13 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
+ ({ \
+ __BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
+ nfp_eth_set_bit_config(nsp, raw_idx, mask, __bf_shf(mask), \
+ val, ctrl_bit); \
+ })
+
/**
* __nfp_eth_set_aneg() - set PHY autonegotiation control bit
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
@@ -515,12 +539,59 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
*/
int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
{
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
NSP_ETH_STATE_ANEG, mode,
NSP_ETH_CTRL_SET_ANEG);
}
/**
+ * __nfp_eth_set_fec() - set PHY forward error correction control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired fec mode
+ *
+ * Set the PHY module forward error correction mode.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_FEC, mode,
+ NSP_ETH_CTRL_SET_FEC);
+}
+
+/**
+ * nfp_eth_set_fec() - set PHY forward error correction control mode
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @mode: Desired fec mode
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ err = __nfp_eth_set_fec(nsp, mode);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
* __nfp_eth_set_speed() - set interface speed/rate
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
* @speed: Desired speed (per lane)
@@ -544,7 +615,7 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
return -EINVAL;
}
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
NSP_ETH_STATE_RATE, rate,
NSP_ETH_CTRL_SET_RATE);
}
@@ -561,6 +632,6 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
*/
int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
{
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
lanes, NSP_ETH_CTRL_SET_LANES);
}