diff options
16 files changed, 1092 insertions, 55 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 532813d8d028..244de500963e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,5 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o cn20k/mbox_init.o + rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \ + cn20k/npa.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c new file mode 100644 index 000000000000..498968bf4cf5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "debugfs.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx) +{ + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", + sq_ctx->ena, sq_ctx->qint_idx); + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", + sq_ctx->substream, sq_ctx->sdp_mcast); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", + sq_ctx->cq, sq_ctx->sqe_way_mask); + + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", + sq_ctx->default_chan, sq_ctx->sqb_count); + + seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", + sq_ctx->sqb_aura, sq_ctx->sq_int); + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); + + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", + sq_ctx->max_sqe_size, sq_ctx->cq_limit); + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", + sq_ctx->lmt_dis, sq_ctx->mnq_dis); + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", + sq_ctx->tail_offset, sq_ctx->smenq_offset); + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); + + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_octs); + seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq; + + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); + + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", + cq_ctx->avg_con, cq_ctx->cint_idx); + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", + cq_ctx->cq_err, cq_ctx->qint_idx); + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", + cq_ctx->bpid, cq_ctx->bp_ena); + + seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high); + seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med); + seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low); + seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n", + cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 | + cq_ctx->lbpid_low); + seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena); + + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", + cq_ctx->update_time, cq_ctx->avg_level); + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", + cq_ctx->head, cq_ctx->tail); + + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); + seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n", + cq_ctx->qsize, cq_ctx->stashing); + + seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching); + seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac); + seq_printf(m, "W3: stash_thresh \t\t\t%d\n", + cq_ctx->stash_thresh); + + seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n", + cq_ctx->msh_valid, cq_ctx->msh_dst); + + seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n", + cq_ctx->cpt_drop_err_en); + seq_printf(m, "W3: ena \t\t\t%d\n", + cq_ctx->ena); + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", + cq_ctx->drop_ena, cq_ctx->drop); + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); + + seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext); + seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext); +} + +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_aura_s *aura = &rsp->aura; + + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); + + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", + aura->ena, aura->pool_caching); + seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con); + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", + aura->pool_drop_ena, aura->aura_drop_ena); + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", + aura->bp_ena, aura->aura_drop); + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", + aura->shift, aura->avg_level); + + seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n", + (u64)aura->count, aura->bpid); + + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", + (u64)aura->limit, aura->bp, aura->fc_ena); + + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", + aura->fc_up_crossing, aura->fc_stype); + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); + + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); + + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", + aura->pool_drop, aura->update_time); + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", + aura->err_int, aura->err_int_ena); + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", + aura->thresh_int, aura->thresh_int_ena); + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", + aura->thresh_up, aura->thresh_qint_idx); + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); + + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); +} + +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_pool_s *pool = &rsp->pool; + + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); + + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", + pool->ena, pool->nat_align); + seq_printf(m, "W1: stack_caching\t%d\n", + pool->stack_caching); + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", + pool->buf_offset, pool->buf_size); + + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", + pool->stack_max_pages, pool->stack_pages); + + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", + pool->stack_offset, pool->shift, pool->avg_level); + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", + pool->avg_con, pool->fc_ena, pool->fc_stype); + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", + pool->fc_hyst_bits, pool->fc_up_crossing); + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); + + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); + + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); + + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); + + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", + pool->err_int, pool->err_int_ena); + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", + pool->thresh_int_ena, pool->thresh_up); + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", + pool->thresh_qint_idx, pool->err_qint_idx); + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h new file mode 100644 index 000000000000..a2e3a2cd6edb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef DEBUFS_H +#define DEBUFS_H + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../mbox.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx); +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c new file mode 100644 index 000000000000..aa2016fd1bba --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu, + struct nix_cn20k_aq_enq_req *req, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, + (struct nix_aq_enq_rsp *)rsp); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c new file mode 100644 index 000000000000..fe8f926c8b75 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu, + struct npa_cn20k_aq_enq_req *req, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req, + (struct npa_aq_enq_rsp *)rsp); +} +EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h index 76ce3ec6da9c..763f6cabd7c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -8,6 +8,8 @@ #ifndef STRUCT_H #define STRUCT_H +#define NIX_MAX_CTX_SIZE 128 + /* * CN20k RVU PF MBOX Interrupt Vector Enumeration * @@ -37,4 +39,342 @@ enum rvu_af_cn20k_int_vec_e { RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, RVU_AF_CN20K_INT_VEC_CNT = 0xa, }; + +struct nix_cn20k_sq_ctx_s { + u64 ena : 1; /* W0 */ + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; + u64 smq : 11; /* W1 */ + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_weight : 14; + u64 default_chan : 12; + u64 sqb_count : 16; + u64 reserved_120_120 : 1; + u64 smq_rr_count_lb : 7; + u64 smq_rr_count_ub : 25; /* W2 */ + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 reserved_191_191 : 1; + u64 max_sqe_size : 2; /* W3 */ + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 reserved_253_255 : 3; + u64 next_sqb : 64; /* W4 */ + u64 tail_sqb : 64; /* W5 */ + u64 smenq_sqb : 64; /* W6 */ + u64 smenq_next_sqb : 64; /* W7 */ + u64 head_sqb : 64; /* W8 */ + u64 reserved_576_583 : 8; /* W9 */ + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 reserved_630_639 : 10; + u64 scm_lso_rem : 18; /* W10 */ + u64 reserved_658_703 : 46; + u64 octs : 48; /* W11 */ + u64 reserved_752_767 : 16; + u64 pkts : 48; /* W12 */ + u64 reserved_816_831 : 16; + u64 aged_drop_octs : 32; /* W13 */ + u64 aged_drop_pkts : 32; + u64 dropped_octs : 48; /* W14 */ + u64 reserved_944_959 : 16; + u64 dropped_pkts : 48; /* W15 */ + u64 reserved_1008_1023 : 16; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_cq_ctx_s { + u64 base : 64; /* W0 */ + u64 lbp_ena : 1; /* W1 */ + u64 lbpid_low : 3; + u64 bp_ena : 1; + u64 lbpid_med : 3; + u64 bpid : 9; + u64 lbpid_high : 3; + u64 qint_idx : 7; + u64 cq_err : 1; + u64 cint_idx : 7; + u64 avg_con : 9; + u64 wrptr : 20; + u64 tail : 20; /* W2 */ + u64 head : 20; + u64 avg_level : 8; + u64 update_time : 16; + u64 bp : 8; /* W3 */ + u64 drop : 8; + u64 drop_ena : 1; + u64 ena : 1; + u64 cpt_drop_err_en : 1; + u64 reserved_211_211 : 1; + u64 msh_dst : 11; + u64 msh_valid : 1; + u64 stash_thresh : 4; + u64 lbp_frac : 4; + u64 caching : 1; + u64 stashing : 1; + u64 reserved_234_235 : 2; + u64 qsize : 4; + u64 cq_err_int : 8; + u64 cq_err_int_ena : 8; + u64 bpid_ext : 2; /* W4 */ + u64 reserved_258_259 : 2; + u64 lbpid_ext : 2; + u64 reserved_262_319 : 58; + u64 reserved_320_383 : 64; /* W5 */ + u64 reserved_384_447 : 64; /* W6 */ + u64 reserved_448_511 : 64; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_rq_ctx_s { + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 reserved_24_34 : 11; + u64 port_il4_dis : 1; + u64 port_ol4_dis : 1; + u64 lenerr_dis : 1; + u64 csum_il4_dis : 1; + u64 csum_ol4_dis : 1; + u64 len_il4_dis : 1; + u64 len_il3_dis : 1; + u64 len_ol4_dis : 1; + u64 len_ol3_dis : 1; + u64 wqe_aura : 20; + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 pb_stashing : 1; + u64 ipsecd_drop_en : 1; + u64 chi_ena : 1; + u64 reserved_125_127 : 3; + u64 band_prof_id_l : 10; + u64 sso_fc_ena : 1; + u64 policer_ena : 1; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 spb_high_sizem1 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 reserved_171_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 band_prof_id_h : 4; + u64 reserved_188_189 : 2; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; + u64 xqe_drop : 8; + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; + u64 lpb_aura_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 reserved_288_291 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 reserved_315_319 : 5; + u64 ltag : 24; + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 ipsec_vwqe : 1; + u64 vwqe_ena : 1; + u64 vtime_wait : 8; + u64 max_vsize_exp : 4; + u64 vwqe_skip : 2; + u64 reserved_382_383 : 2; + u64 octs : 48; + u64 reserved_432_447 : 16; + u64 pkts : 48; + u64 reserved_496_511 : 16; + u64 drop_octs : 48; + u64 reserved_560_575 : 16; + u64 drop_pkts : 48; + u64 reserved_624_639 : 16; + u64 re_pkts : 48; + u64 reserved_688_703 : 16; + u64 reserved_704_767 : 64; + u64 reserved_768_831 : 64; + u64 reserved_832_895 : 64; + u64 reserved_896_959 : 64; + u64 reserved_960_1023 : 64; +}; + +static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_aura_s { + u64 pool_addr; /* W0 */ + u64 ena : 1; /* W1 */ + u64 reserved_65 : 2; + u64 pool_caching : 1; + u64 reserved_68 : 16; + u64 avg_con : 9; + u64 reserved_93 : 1; + u64 pool_drop_ena : 1; + u64 aura_drop_ena : 1; + u64 bp_ena : 1; + u64 reserved_97_103 : 7; + u64 aura_drop : 8; + u64 shift : 6; + u64 reserved_118_119 : 2; + u64 avg_level : 8; + u64 count : 36; /* W2 */ + u64 reserved_164_167 : 4; + u64 bpid : 12; + u64 reserved_180_191 : 12; + u64 limit : 36; /* W3 */ + u64 reserved_228_231 : 4; + u64 bp : 7; + u64 reserved_239_243 : 5; + u64 fc_ena : 1; + u64 fc_up_crossing : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 reserved_252_255 : 4; + u64 fc_addr; /* W4 */ + u64 pool_drop : 8; /* W5 */ + u64 update_time : 16; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_363 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_371 : 1; + u64 err_qint_idx : 7; + u64 reserved_379_383 : 5; + u64 thresh : 36; /* W6*/ + u64 rsvd_423_420 : 4; + u64 fc_msh_dst : 11; + u64 reserved_435_438 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_445_445 : 1; + u64 stream_ctx : 1; + u64 unified_ctx : 1; + u64 reserved_448_511; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_pool_s { + u64 stack_base; /* W0 */ + u64 ena : 1; + u64 nat_align : 1; + u64 reserved_66_67 : 2; + u64 stack_caching : 1; + u64 reserved_69_87 : 19; + u64 buf_offset : 12; + u64 reserved_100_103 : 4; + u64 buf_size : 12; + u64 reserved_116_119 : 4; + u64 ref_cnt_prof : 3; + u64 reserved_123_127 : 5; + u64 stack_max_pages : 32; + u64 stack_pages : 32; + uint64_t bp_0 : 7; + uint64_t bp_1 : 7; + uint64_t bp_2 : 7; + uint64_t bp_3 : 7; + uint64_t bp_4 : 7; + uint64_t bp_5 : 7; + uint64_t bp_6 : 7; + uint64_t bp_7 : 7; + uint64_t bp_ena_0 : 1; + uint64_t bp_ena_1 : 1; + uint64_t bp_ena_2 : 1; + uint64_t bp_ena_3 : 1; + uint64_t bp_ena_4 : 1; + uint64_t bp_ena_5 : 1; + uint64_t bp_ena_6 : 1; + uint64_t bp_ena_7 : 1; + u64 stack_offset : 4; + u64 reserved_260_263 : 4; + u64 shift : 6; + u64 reserved_270_271 : 2; + u64 avg_level : 8; + u64 avg_con : 9; + u64 fc_ena : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 fc_up_crossing : 1; + u64 reserved_297_299 : 3; + u64 update_time : 16; + u64 reserved_316_319 : 4; + u64 fc_addr; /* W5 */ + u64 ptr_start; /* W6 */ + u64 ptr_end; /* W7 */ + u64 bpid_0 : 12; + u64 reserved_524_535 : 12; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_555 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_563 : 1; + u64 err_qint_idx : 7; + u64 reserved_571_575 : 5; + u64 thresh : 36; + u64 rsvd_612_615 : 4; + u64 fc_msh_dst : 11; + u64 reserved_627_630 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_637_637 : 1; + u64 stream_ctx : 1; + u64 reserved_639 : 1; + u64 reserved_640_703; /* W10 */ + u64 reserved_704_767; /* W11 */ + u64 reserved_768_831; /* W12 */ + u64 reserved_832_895; /* W13 */ + u64 reserved_896_959; /* W14 */ + u64 reserved_960_1023; /* W15 */ +}; + +static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE); + #endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 933073cd2280..a3e273126e4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -203,6 +203,8 @@ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ +M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \ + npa_cn20k_aq_enq_rsp) \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ @@ -336,6 +338,8 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ +M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \ + nix_cn20k_aq_enq_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -832,6 +836,39 @@ struct npa_aq_enq_rsp { }; }; +struct npa_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 aura_id; + u8 ctype; + u8 op; + union { + /* Valid when op == WRITE/INIT and ctype == AURA. + * LF fills the pool_id in aura.pool_addr. AF will translate + * the pool_id to pool context pointer. + */ + struct npa_cn20k_aura_s aura; + /* Valid when op == WRITE/INIT and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; + /* Mask data when op == WRITE (1=write, 0=don't write) */ + union { + /* Valid when op == WRITE and ctype == AURA */ + struct npa_cn20k_aura_s aura_mask; + /* Valid when op == WRITE and ctype == POOL */ + struct npa_cn20k_pool_s pool_mask; + }; +}; + +struct npa_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + /* Valid when op == READ and ctype == AURA */ + struct npa_cn20k_aura_s aura; + /* Valid when op == READ and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; +}; + /* Disable all contexts of type 'ctype' */ struct hwctx_disable_req { struct mbox_msghdr hdr; @@ -940,6 +977,42 @@ struct nix_lf_free_req { u64 flags; }; +/* CN20K NIX AQ enqueue msg */ +struct nix_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; + union { + struct nix_cn20k_rq_ctx_s rq_mask; + struct nix_cn20k_sq_ctx_s sq_mask; + struct nix_cn20k_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + struct nix_bandprof_s prof_mask; + }; +}; + +struct nix_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; +}; + /* CN10K NIX AQ enqueue msg */ struct nix_cn10k_aq_enq_req { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b58283341923..e85dac2c806d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -498,6 +498,14 @@ struct channel_fwdata { u8 reserved[RVU_CHANL_INFO_RESERVED]; }; +struct altaf_intr_notify { + unsigned long flr_pf_bmap[2]; + unsigned long flr_vf_bmap[2]; + unsigned long gint_paddr; + unsigned long gint_iova_addr; + unsigned long reserved[6]; +}; + struct rvu_fwdata { #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ #define RVU_FWDATA_VERSION 0x0001 @@ -517,7 +525,8 @@ struct rvu_fwdata { u32 ptp_ext_clk_rate; u32 ptp_ext_tstamp; struct channel_fwdata channel_data; -#define FWDATA_RESERVED_MEM 958 + struct altaf_intr_notify altaf_intr_info; +#define FWDATA_RESERVED_MEM 946 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 9 #define CGX_LMACS_MAX 4 @@ -648,6 +657,7 @@ struct rvu { struct mutex mbox_lock; /* Serialize mbox up and down msgs */ u16 rep_pcifunc; + bool altaf_ready; int rep_cnt; u16 *rep2pfvf_map; u8 rep_mode; @@ -1032,6 +1042,9 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, int blkaddr, int nixlf); void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr); +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp); + /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 8375f18c8e07..7370812ece2a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -21,6 +21,8 @@ #include "rvu_npc_hash.h" #include "mcs.h" +#include "cn20k/debugfs.h" + #define DEBUGFS_DIR_NAME "octeontx2" enum { @@ -1101,6 +1103,11 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_aura_s *aura = &rsp->aura; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", @@ -1149,6 +1156,11 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_pool_s *pool = &rsp->pool; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", @@ -2009,10 +2021,16 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx); + return; + } + if (!is_rvu_otx2(rvu)) { print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); return; } + seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", sq_ctx->sqe_way_mask, sq_ctx->cq); seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", @@ -2103,7 +2121,9 @@ static void print_nix_cn10k_rq_ctx(struct seq_file *m, seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); - seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); + seq_printf(m, "W2: band_prof_id \t\t%d\n", + (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id); + seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", @@ -2225,6 +2245,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); @@ -2254,6 +2279,7 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", cq_ctx->qsize, cq_ctx->caching); + seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", cq_ctx->substream, cq_ctx->ena); if (!is_rvu_otx2(rvu)) { @@ -2615,7 +2641,10 @@ static void print_band_prof_ctx(struct seq_file *m, (prof->rc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: rc_action\t\t%s\n", str); seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); - seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); + + seq_printf(m, "W1: band_prof_id\t%d\n", + (u16)prof->band_prof_id_h << 7 | prof->band_prof_id); + seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); @@ -2784,6 +2813,9 @@ static void rvu_dbg_npa_init(struct rvu *rvu) &rvu_dbg_npa_aura_ctx_fops); debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_pool_ctx_fops); + + if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */ + return; debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_ndc_cache_fops); debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, @@ -3950,6 +3982,9 @@ static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) static const char *rvu_get_dbg_dir_name(struct rvu *rvu) { + if (is_cn20k(rvu->pdev)) + return "cn20k"; + if (!is_rvu_otx2(rvu)) return "cn10k"; else diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 828316211b24..2f485a930edd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1019,6 +1019,12 @@ static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, { struct nix_cn10k_aq_enq_req *aq_req; + if (is_cn20k(rvu->pdev)) { + *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; + *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; + return; + } + if (!is_rvu_otx2(rvu)) { aq_req = (struct nix_cn10k_aq_enq_req *)req; *smq = aq_req->sq.smq; @@ -1149,36 +1155,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, case NIX_AQ_INSTOP_WRITE: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(mask, &req->rq_mask, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(mask, &req->sq_mask, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(mask, &req->cq_mask, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(mask, &req->rss_mask, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(mask, &req->prof_mask, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) - memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) - memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) - memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) - memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) - memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) - memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); + memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: @@ -1243,22 +1249,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (req->op == NIX_AQ_INSTOP_READ) { if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(&rsp->rq, ctx, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(&rsp->sq, ctx, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(&rsp->cq, ctx, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(&rsp->rss, ctx, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(&rsp->prof, ctx, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); } } @@ -1289,8 +1295,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, /* Make copy of original context & mask which are required * for resubmission */ - memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); - memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); + memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); /* exclude fields which HW can update */ aq_req.cq_mask.cq_err = 0; @@ -1309,7 +1315,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, * updated fields are masked out for request and response * comparison */ - for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); word++) { *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); @@ -1317,14 +1323,14 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); } - if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; return 0; } -static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp) +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; int err, retries = 5; @@ -5812,6 +5818,8 @@ static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) } } +#define NIX_BW_PROF_HI_MASK GENMASK(10, 7) + static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc) { @@ -5850,7 +5858,8 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, return -EINVAL; ipolicer = &nix_hw->ipolicer[hi_layer]; - prof_idx = req->prof.band_prof_id; + prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); + prof_idx |= req->prof.band_prof_id; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) return -EINVAL; @@ -6015,8 +6024,10 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, aq_req->op = NIX_AQ_INSTOP_WRITE; aq_req->qidx = leaf_prof; - aq_req->prof.band_prof_id = mid_prof; + aq_req->prof.band_prof_id = mid_prof & 0x7F; aq_req->prof_mask.band_prof_id = GENMASK(6, 0); + aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); + aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); aq_req->prof.hl_en = 1; aq_req->prof_mask.hl_en = 1; @@ -6025,6 +6036,8 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, (struct nix_aq_enq_rsp *)aq_rsp); } +#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) + int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, u16 rq_idx, u16 match_id) { @@ -6056,7 +6069,8 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, return 0; /* Get the bandwidth profile ID mapped to this RQ */ - leaf_prof = aq_rsp.rq.band_prof_id; + leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); + leaf_prof |= aq_rsp.rq.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; ipolicer->match_id[leaf_prof] = match_id; @@ -6094,7 +6108,10 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, * to different RQs and marked with same match_id * are rate limited in a aggregate fashion */ - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, + aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_prof, mid_prof); @@ -6216,7 +6233,8 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, if (!aq_rsp.prof.hl_en) return; - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; ipolicer->ref_count[mid_prof]--; /* If ref_count is zero, free mid layer profile */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 4f5ca5ab13a4..e2a33e46b48a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -464,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, return 0; } +static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return; + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); + cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); +} + static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) { u64 cfg; @@ -479,14 +496,7 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); #endif - /* Do not bypass NDC cache */ - cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); - cfg &= ~0x03DULL; -#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING - /* Disable caching of stack pages */ - cfg |= 0x10ULL; -#endif - rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + npa_aq_ndc_config(rvu, block); /* For CN10K NPA BATCH DMA set 35 cache lines */ if (!is_rvu_otx2(rvu)) { @@ -567,6 +577,9 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr) int bank, max_bank, line, max_line, err; u64 reg, ndc_af_const; + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return 0; + /* Set the ENABLE bit(63) to '0' */ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL); rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 0596a3ac4c12..8e868f815de1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -13,6 +13,8 @@ #define RVU_MULTI_BLK_VER 0x7ULL +#define NIX_MAX_CTX_SIZE 128 + /* RVU Block Address Enumeration */ enum rvu_block_addr_e { BLKADDR_RVUM = 0x0ULL, @@ -370,8 +372,12 @@ struct nix_cq_ctx_s { u64 qsize : 4; u64 cq_err_int : 8; u64 cq_err_int_ena : 8; + /* Ensure all context sizes are 128 bytes */ + u64 padding[12]; }; +static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Receive queue context structure */ struct nix_cn10k_rq_ctx_s { u64 ena : 1; @@ -413,7 +419,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_171 : 1; u64 later_skip : 6; u64 xqe_imm_size : 6; - u64 rsvd_189_184 : 6; + u64 band_prof_id_h : 4; + u64 rsvd_189_188 : 2; u64 xqe_imm_copy : 1; u64 xqe_hdr_split : 1; u64 xqe_drop : 8; /* W3 */ @@ -460,6 +467,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Send queue context structure */ struct nix_cn10k_sq_ctx_s { u64 ena : 1; @@ -523,6 +532,8 @@ struct nix_cn10k_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive queue context structure */ struct nix_rq_ctx_s { u64 ena : 1; @@ -594,6 +605,8 @@ struct nix_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX sqe sizes */ enum nix_maxsqesz { NIX_MAXSQESZ_W16 = 0x0, @@ -668,13 +681,18 @@ struct nix_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive side scaling entry structure*/ struct nix_rsse_s { uint32_t rq : 20; uint32_t reserved_20_31 : 12; - + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE); + /* NIX receive multicast/mirror entry structure */ struct nix_rx_mce_s { uint64_t op : 2; @@ -684,8 +702,12 @@ struct nix_rx_mce_s { uint64_t rsvd_31_24 : 8; uint64_t pf_func : 16; uint64_t next : 16; + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE); + enum nix_band_prof_layers { BAND_PROF_LEAF_LAYER = 0, BAND_PROF_INVAL_LAYER = 1, @@ -736,7 +758,8 @@ struct nix_bandprof_s { uint64_t rc_action : 2; uint64_t meter_algo : 2; uint64_t band_prof_id : 7; - uint64_t reserved_111_118 : 8; + uint64_t band_prof_id_h : 4; + uint64_t reserved_115_118 : 4; uint64_t hl_en : 1; uint64_t reserved_120_127 : 8; uint64_t ts : 48; /* W2 */ @@ -769,6 +792,8 @@ struct nix_bandprof_s { uint64_t reserved_1008_1023 : 16; }; +static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE); + enum nix_lsoalg { NIX_LSOALG_NOP, NIX_LSOALG_ADD_SEGNUM, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index bec7d5b4d7cc..3e1bf22cba69 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -15,6 +15,8 @@ static struct dev_hw_ops otx2_hw_ops = { .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -23,6 +25,8 @@ static struct dev_hw_ops cn10k_hw_ops = { .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; void otx2_init_hw_ops(struct otx2_nic *pfvf) @@ -337,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, aq->rq.band_prof_id = policer; aq->rq_mask.band_prof_id = GENMASK(9, 0); + /* If policer id is greater than 1023 then it implies hardware supports + * more leaf profiles. In that case use band_prof_id_h for 4 MSBs. + */ + aq->rq.band_prof_id_h = policer >> 10; + aq->rq_mask.band_prof_id_h = GENMASK(3, 0); + /* Fill AQ info */ aq->qidx = rq_idx; aq->ctype = NIX_AQ_CTYPE_RQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c index ec8cde98076d..a60f8cf53feb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -10,17 +10,6 @@ #include "otx2_struct.h" #include "cn10k.h" -static struct dev_hw_ops cn20k_hw_ops = { - .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, - .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, - .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, -}; - -void cn20k_init(struct otx2_nic *pfvf) -{ - pfvf->hw_ops = &cn20k_hw_ops; -} -EXPORT_SYMBOL(cn20k_init); /* CN20K mbox AF => PFx irq handler */ irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) { @@ -250,3 +239,212 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) return 0; } + +#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ + +static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id) +{ +#ifdef CONFIG_DCB + return pfvf->queue_to_pfc_map[aura_id]; +#else + return 0; +#endif +} + +static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + u8 bpid_idx; + int err; + + pool = &pfvf->qset.pool[pool_id]; + + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) + return err; + } + + /* Initialize this aura's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + } + + aq->aura_id = aura_id; + + /* Will be filled by AF with correct pool context address */ + aq->aura.pool_addr = pool_id; + aq->aura.pool_caching = 1; + aq->aura.shift = ilog2(numptrs) - 8; + aq->aura.count = numptrs; + aq->aura.limit = numptrs; + aq->aura.avg_level = 255; + aq->aura.ena = 1; + aq->aura.fc_ena = 1; + aq->aura.fc_addr = pool->fc_addr->iova; + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ + + /* Enable backpressure for RQ aura */ + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { + aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; + + bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id); + aq->aura.bpid = pfvf->bpid[bpid_idx]; + + /* Set backpressure level for RQ's Aura */ + aq->aura.bp = RQ_BP_LVL_AURA; + } + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_AURA; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type) +{ + struct page_pool_params pp_params = { 0 }; + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + int err, sz; + + pool = &pfvf->qset.pool[pool_id]; + /* Alloc memory for stack which is used to store buffer pointers */ + err = qmem_alloc(pfvf->dev, &pool->stack, + stack_pages, pfvf->hw.stack_pg_bytes); + if (err) + return err; + + pool->rbsize = buf_size; + + /* Initialize this pool's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + qmem_free(pfvf->dev, pool->stack); + return -ENOMEM; + } + } + + aq->aura_id = pool_id; + aq->pool.stack_base = pool->stack->iova; + aq->pool.stack_caching = 1; + aq->pool.ena = 1; + aq->pool.buf_size = buf_size / 128; + aq->pool.stack_max_pages = stack_pages; + aq->pool.shift = ilog2(numptrs) - 8; + aq->pool.ptr_start = 0; + aq->pool.ptr_end = ~0ULL; + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_POOL; + aq->op = NPA_AQ_INSTOP_INIT; + + if (type != AURA_NIX_RQ) { + pool->page_pool = NULL; + return 0; + } + + sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE); + pp_params.order = get_order(sz); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } + + return 0; +} + +static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) +{ + struct nix_cn20k_aq_enq_req *aq; + struct otx2_nic *pfvf = dev; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->sq.cq = pfvf->hw.rx_queues + qidx; + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ + aq->sq.cq_ena = 1; + aq->sq.ena = 1; + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ + aq->sq.sqb_aura = sqb_aura; + aq->sq.sq_int_ena = NIX_SQINT_BITS; + aq->sq.qint_idx = 0; + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow. + */ + aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, + .sq_aq_init = cn20k_sq_aq_init, + .sqe_flush = cn10k_sqe_flush, + .aura_freeptr = cn10k_aura_freeptr, + .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .aura_aq_init = cn20k_aura_aq_init, + .pool_aq_init = cn20k_pool_aq_init, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index aff17c37ddde..3378be87a473 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1369,6 +1369,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs) { + return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id, + numptrs); +} + +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1446,6 +1453,13 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { + return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs, + buf_size, type); +} + +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type) +{ struct page_pool_params pp_params = { 0 }; struct xsk_buff_pool *xsk_pool; struct npa_aq_enq_req *aq; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index ec26d1b6c789..e616a727a3a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -14,6 +14,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> +#include <linux/soc/marvell/silicons.h> #include <linux/soc/marvell/octeontx2/asm.h> #include <net/macsec.h> #include <net/pkt_cls.h> @@ -375,6 +376,11 @@ struct dev_hw_ops { irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); + int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); + int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type); }; #define CN10K_MCS_SA_PER_SC 4 @@ -1059,6 +1065,10 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); int otx2_set_hw_capabilities(struct otx2_nic *pfvf); +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); |
