diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic')
17 files changed, 3383 insertions, 277 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 0048b5946712..73fdb8798614 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -11,4 +11,8 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_devlink.o rvu_nicvf-y := otx2_vf.o otx2_devlink.o +rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o +rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o +rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o + ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index fd4f083c699e..826f691de259 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ aq->sq.cq_ena = 1; aq->sq.ena = 1; - /* Only one SMQ is allocated, map all SQ's to that SMQ */ - aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c new file mode 100644 index 000000000000..9ec5f38d38a8 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell MACSEC hardware offload driver + * + * Copyright (C) 2022 Marvell. + */ + +#include <linux/rtnetlink.h> +#include <linux/bitfield.h> +#include <net/macsec.h> +#include "otx2_common.h" + +#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48) +#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0) +#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32) + +#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9) + +#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18) +#define MCS_RX_SECY_PLCY_RP BIT_ULL(17) +#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16) +#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5) +#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1) +#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0) + +#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28) +#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22) +#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15) +#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14) +#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13) +#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2) +#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1) +#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0) + +#define MCS_GCM_AES_128 0 +#define MCS_GCM_AES_256 1 +#define MCS_GCM_AES_XPN_128 2 +#define MCS_GCM_AES_XPN_256 3 + +#define MCS_TCI_ES 0x40 /* end station */ +#define MCS_TCI_SC 0x20 /* SCI present */ +#define MCS_TCI_SCB 0x10 /* epon */ +#define MCS_TCI_E 0x08 /* encryption */ +#define MCS_TCI_C 0x04 /* changed text */ + +static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg, + struct macsec_secy *secy) +{ + struct cn10k_mcs_txsc *txsc; + + list_for_each_entry(txsc, &cfg->txsc_list, entry) { + if (txsc->sw_secy == secy) + return txsc; + } + + return NULL; +} + +static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg, + struct macsec_secy *secy, + struct macsec_rx_sc *rx_sc) +{ + struct cn10k_mcs_rxsc *rxsc; + + list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { + if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy) + return rxsc; + } + + return NULL; +} + +static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) +{ + switch (rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + return "FLOW"; + case MCS_RSRC_TYPE_SC: + return "SC"; + case MCS_RSRC_TYPE_SECY: + return "SECY"; + case MCS_RSRC_TYPE_SA: + return "SA"; + default: + return "Unknown"; + }; + + return "Unknown"; +} + +static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, + enum mcs_rsrc_type type, u16 *rsrc_id) +{ + struct mbox *mbox = &pfvf->mbox; + struct mcs_alloc_rsrc_req *req; + struct mcs_alloc_rsrc_rsp *rsp; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox); + if (!req) + goto fail; + + req->rsrc_type = type; + req->rsrc_cnt = 1; + req->dir = dir; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt || + req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) { + ret = -EINVAL; + goto fail; + } + + switch (rsp->rsrc_type) { + case MCS_RSRC_TYPE_FLOWID: + *rsrc_id = rsp->flow_ids[0]; + break; + case MCS_RSRC_TYPE_SC: + *rsrc_id = rsp->sc_ids[0]; + break; + case MCS_RSRC_TYPE_SECY: + *rsrc_id = rsp->secy_ids[0]; + break; + case MCS_RSRC_TYPE_SA: + *rsrc_id = rsp->sa_ids[0]; + break; + default: + ret = -EINVAL; + goto fail; + } + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Failed to allocate %s %s resource\n", + dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); + mutex_unlock(&mbox->lock); + return ret; +} + +static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, + enum mcs_rsrc_type type, u16 hw_rsrc_id, + bool all) +{ + struct mbox *mbox = &pfvf->mbox; + struct mcs_free_rsrc_req *req; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_free_resources(mbox); + if (!req) + goto fail; + + req->rsrc_id = hw_rsrc_id; + req->rsrc_type = type; + req->dir = dir; + if (all) + req->all = 1; + + if (otx2_sync_mbox_msg(&pfvf->mbox)) + goto fail; + + mutex_unlock(&mbox->lock); + + return; +fail: + dev_err(pfvf->dev, "Failed to free %s %s resource\n", + dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); + mutex_unlock(&mbox->lock); +} + +static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id) +{ + return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id); +} + +static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id) +{ + return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id); +} + +static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id) +{ + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false); +} + +static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id) +{ + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false); +} + +static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf, + struct macsec_secy *secy, u8 hw_secy_id) +{ + struct mcs_secy_plcy_write_req *req; + struct mbox *mbox = &pfvf->mbox; + u64 policy; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window); + if (secy->replay_protect) + policy |= MCS_RX_SECY_PLCY_RP; + + policy |= MCS_RX_SECY_PLCY_AUTH_ENA; + policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128); + policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames); + + policy |= MCS_RX_SECY_PLCY_ENA; + + req->plcy = policy; + req->secy_id = hw_secy_id; + req->dir = MCS_RX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) +{ + struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; + struct mcs_flowid_entry_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC); + req->mask[1] = ~0ULL; + req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK; + + req->mask[0] = ~0ULL; + req->mask[2] = ~0ULL; + req->mask[3] = ~0ULL; + + req->flow_id = rxsc->hw_flow_id; + req->secy_id = hw_secy_id; + req->sc_id = rxsc->hw_sc_id; + req->dir = MCS_RX; + + if (sw_rx_sc->active) + req->ena = 1; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) +{ + struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; + struct mcs_rx_sc_cam_write_req *sc_req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox); + if (!sc_req) { + ret = -ENOMEM; + goto fail; + } + + sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci); + sc_req->sc_id = rxsc->hw_sc_id; + sc_req->secy_id = hw_secy_id; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_rxsc *rxsc, + u8 assoc_num, bool sa_in_use) +{ + unsigned char *src = rxsc->sa_key[assoc_num]; + struct mcs_sa_plcy_write_req *plcy_req; + struct mcs_rx_sc_sa_map *map_req; + struct mbox *mbox = &pfvf->mbox; + u8 reg, key_len; + int ret; + + mutex_lock(&mbox->lock); + + plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); + if (!plcy_req) { + ret = -ENOMEM; + goto fail; + } + + map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox); + if (!map_req) { + otx2_mbox_reset(&mbox->mbox, 0); + ret = -ENOMEM; + goto fail; + } + + for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) { + memcpy((u8 *)&plcy_req->plcy[0][reg], + (src + reg * 8), 8); + reg++; + } + + plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num]; + plcy_req->sa_cnt = 1; + plcy_req->dir = MCS_RX; + + map_req->sa_index = rxsc->hw_sa_id[assoc_num]; + map_req->sa_in_use = sa_in_use; + map_req->sc_id = rxsc->hw_sc_id; + map_req->an = assoc_num; + + /* Send two messages together */ + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc, + u8 assoc_num, u64 next_pn) +{ + struct mcs_pn_table_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->pn_id = rxsc->hw_sa_id[assoc_num]; + req->next_pn = next_pn; + req->dir = MCS_RX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct mcs_secy_plcy_write_req *req; + struct mbox *mbox = &pfvf->mbox; + struct macsec_tx_sc *sw_tx_sc; + /* Insert SecTag after 12 bytes (DA+SA)*/ + u8 tag_offset = 12; + u8 sectag_tci = 0; + u64 policy; + int ret; + + sw_tx_sc = &secy->tx_sc; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + if (sw_tx_sc->send_sci) { + sectag_tci |= MCS_TCI_SC; + } else { + if (sw_tx_sc->end_station) + sectag_tci |= MCS_TCI_ES; + if (sw_tx_sc->scb) + sectag_tci |= MCS_TCI_SCB; + } + + if (sw_tx_sc->encrypt) + sectag_tci |= (MCS_TCI_E | MCS_TCI_C); + + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + /* Write SecTag excluding AN bits(1..0) */ + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); + policy |= MCS_TX_SECY_PLCY_INS_MODE; + policy |= MCS_TX_SECY_PLCY_AUTH_ENA; + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128); + + if (secy->protect_frames) + policy |= MCS_TX_SECY_PLCY_PROTECT; + + /* If the encodingsa does not exist/active and protect is + * not set then frames can be sent out as it is. Hence enable + * the policy irrespective of secy operational when !protect. + */ + if (!secy->protect_frames || secy->operational) + policy |= MCS_TX_SECY_PLCY_ENA; + + req->plcy = policy; + req->secy_id = txsc->hw_secy_id_tx; + req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct mcs_flowid_entry_write_req *req; + struct mbox *mbox = &pfvf->mbox; + u64 mac_sa; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + mac_sa = ether_addr_to_u64(secy->netdev->dev_addr); + + req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa); + req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16); + + req->mask[0] = ~0ULL; + req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK; + + req->mask[1] = ~0ULL; + req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK; + + req->mask[2] = ~0ULL; + req->mask[3] = ~0ULL; + + req->flow_id = txsc->hw_flow_id; + req->secy_id = txsc->hw_secy_id_tx; + req->sc_id = txsc->hw_sc_id; + req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci); + req->dir = MCS_TX; + /* This can be enabled since stack xmits packets only when interface is up */ + req->ena = 1; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + u8 sa_num, bool sa_active) +{ + struct mcs_tx_sc_sa_map *map_req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + /* Link the encoding_sa only to SC out of all SAs */ + if (txsc->encoding_sa != sa_num) + return 0; + + mutex_lock(&mbox->lock); + + map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox); + if (!map_req) { + otx2_mbox_reset(&mbox->mbox, 0); + ret = -ENOMEM; + goto fail; + } + + map_req->sa_index0 = txsc->hw_sa_id[sa_num]; + map_req->sa_index0_vld = sa_active; + map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci); + map_req->sc_id = txsc->hw_sc_id; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf, + struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + u8 assoc_num) +{ + unsigned char *src = txsc->sa_key[assoc_num]; + struct mcs_sa_plcy_write_req *plcy_req; + struct mbox *mbox = &pfvf->mbox; + u8 reg, key_len; + int ret; + + mutex_lock(&mbox->lock); + + plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); + if (!plcy_req) { + ret = -ENOMEM; + goto fail; + } + + for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) { + memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8); + reg++; + } + + plcy_req->plcy[0][8] = assoc_num; + plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num]; + plcy_req->sa_cnt = 1; + plcy_req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf, + struct cn10k_mcs_txsc *txsc, + u8 assoc_num, u64 next_pn) +{ + struct mcs_pn_table_write_req *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->pn_id = txsc->hw_sa_id[assoc_num]; + req->next_pn = next_pn; + req->dir = MCS_TX; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id, + bool enable, enum mcs_direction dir) +{ + struct mcs_flowid_ena_dis_entry *req; + struct mbox *mbox = &pfvf->mbox; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->flow_id = hw_flow_id; + req->ena = enable; + req->dir = dir; + + ret = otx2_sync_mbox_msg(mbox); + +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id, + struct mcs_sa_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_stats_req *req; + struct mcs_sa_stats *rsp; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_sa_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_sa_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SA; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id, + struct mcs_sc_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_stats_req *req; + struct mcs_sc_stats *rsp; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_sc_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_sc_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SC; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id, + struct mcs_secy_stats *rsp_p, + enum mcs_direction dir, bool clear) +{ + struct mcs_clear_stats *clear_req; + struct mbox *mbox = &pfvf->mbox; + struct mcs_secy_stats *rsp; + struct mcs_stats_req *req; + int ret; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox); + if (!req) { + ret = -ENOMEM; + goto fail; + } + + req->id = hw_secy_id; + req->dir = dir; + + if (!clear) + goto send_msg; + + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); + if (!clear_req) { + ret = -ENOMEM; + goto fail; + } + clear_req->id = hw_secy_id; + clear_req->dir = dir; + clear_req->type = MCS_RSRC_TYPE_SECY; + +send_msg: + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + goto fail; + } + + memcpy(rsp_p, rsp, sizeof(*rsp_p)); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + mutex_unlock(&mbox->lock); + return ret; +} + +static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf) +{ + struct cn10k_mcs_txsc *txsc; + int ret; + + txsc = kzalloc(sizeof(*txsc), GFP_KERNEL); + if (!txsc) + return ERR_PTR(-ENOMEM); + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + &txsc->hw_flow_id); + if (ret) + goto fail; + + /* For a SecY, one TX secy and one RX secy HW resources are needed */ + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + &txsc->hw_secy_id_tx); + if (ret) + goto free_flowid; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + &txsc->hw_secy_id_rx); + if (ret) + goto free_tx_secy; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, + &txsc->hw_sc_id); + if (ret) + goto free_rx_secy; + + return txsc; +free_rx_secy: + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_rx, false); +free_tx_secy: + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_tx, false); +free_flowid: + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + txsc->hw_flow_id, false); +fail: + kfree(txsc); + return ERR_PTR(ret); +} + +/* Free Tx SC and its SAs(if any) resources to AF + */ +static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf, + struct cn10k_mcs_txsc *txsc) +{ + u8 sa_bmap = txsc->sa_bmap; + u8 sa_num = 0; + + while (sa_bmap) { + if (sa_bmap & 1) { + cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy, + txsc, sa_num); + cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); + } + sa_num++; + sa_bmap >>= 1; + } + + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, + txsc->hw_sc_id, false); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_rx, false); + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, + txsc->hw_secy_id_tx, false); + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, + txsc->hw_flow_id, false); +} + +static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf) +{ + struct cn10k_mcs_rxsc *rxsc; + int ret; + + rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL); + if (!rxsc) + return ERR_PTR(-ENOMEM); + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + &rxsc->hw_flow_id); + if (ret) + goto fail; + + ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, + &rxsc->hw_sc_id); + if (ret) + goto free_flowid; + + return rxsc; +free_flowid: + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + rxsc->hw_flow_id, false); +fail: + kfree(rxsc); + return ERR_PTR(ret); +} + +/* Free Rx SC and its SAs(if any) resources to AF + */ +static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf, + struct cn10k_mcs_rxsc *rxsc) +{ + u8 sa_bmap = rxsc->sa_bmap; + u8 sa_num = 0; + + while (sa_bmap) { + if (sa_bmap & 1) { + cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc, + sa_num, false); + cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); + } + sa_num++; + sa_bmap >>= 1; + } + + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, + rxsc->hw_sc_id, false); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, + rxsc->hw_flow_id, false); +} + +static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc, + struct macsec_tx_sa *sw_tx_sa, u8 sa_num) +{ + if (sw_tx_sa) { + cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); + cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, + sw_tx_sa->active); + } + + cn10k_mcs_write_tx_secy(pfvf, secy, txsc); + cn10k_mcs_write_tx_flowid(pfvf, secy, txsc); + /* When updating secy, change RX secy also */ + cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx); + + return 0; +} + +static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf, + struct macsec_secy *secy, u8 hw_secy_id) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *mcs_rx_sc; + struct macsec_rx_sc *sw_rx_sc; + struct macsec_rx_sa *sw_rx_sa; + u8 sa_num; + + for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; + sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { + mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (unlikely(!mcs_rx_sc)) + continue; + + for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) { + sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]); + if (!sw_rx_sa) + continue; + + cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc, + sa_num, sw_rx_sa->active); + cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num, + sw_rx_sa->next_pn_halves.lower); + } + + cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id); + cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id); + } + + return 0; +} + +static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf, + struct macsec_secy *secy, + bool delete) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *mcs_rx_sc; + struct macsec_rx_sc *sw_rx_sc; + int ret; + + for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; + sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { + mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (unlikely(!mcs_rx_sc)) + continue; + + ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id, + false, MCS_RX); + if (ret) + dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n", + mcs_rx_sc->hw_sc_id); + if (delete) { + cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc); + list_del(&mcs_rx_sc->entry); + kfree(mcs_rx_sc); + } + } + + return 0; +} + +static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy, + struct cn10k_mcs_txsc *txsc) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_secy_stats rx_rsp = { 0 }; + struct mcs_sc_stats sc_rsp = { 0 }; + struct cn10k_mcs_rxsc *rxsc; + + /* Because of shared counters for some stats in the hardware, when + * updating secy policy take a snapshot of current stats and reset them. + * Below are the effected stats because of shared counters. + */ + + /* Check if sync is really needed */ + if (secy->validate_frames == txsc->last_validate_frames && + secy->protect_frames == txsc->last_protect_frames) + return; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); + + txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; + txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; + txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; + if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT) + txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; + else + txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; + + list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { + cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true); + + rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt; + rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt; + + rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt; + rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt; + + if (txsc->last_protect_frames) + rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt; + else + rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt; + + if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK) + rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt; + else + rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt; + } + + txsc->last_validate_frames = secy->validate_frames; + txsc->last_protect_frames = secy->protect_frames; +} + +static int cn10k_mdo_open(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct macsec_tx_sa *sw_tx_sa; + struct cn10k_mcs_txsc *txsc; + u8 sa_num; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + sa_num = txsc->encoding_sa; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); + + err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); + if (err) + return err; + + return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx); +} + +static int cn10k_mdo_stop(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); + if (err) + return err; + + return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false); +} + +static int cn10k_mdo_add_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_txsc *txsc; + + if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) + return -EOPNOTSUPP; + + /* Stick to 16 bytes key len until XPN support is added */ + if (secy->key_len != 16) + return -EOPNOTSUPP; + + if (secy->xpn) + return -EOPNOTSUPP; + + txsc = cn10k_mcs_create_txsc(pfvf); + if (IS_ERR(txsc)) + return -ENOSPC; + + txsc->sw_secy = secy; + txsc->encoding_sa = secy->tx_sc.encoding_sa; + txsc->last_validate_frames = secy->validate_frames; + txsc->last_protect_frames = secy->protect_frames; + + list_add(&txsc->entry, &cfg->txsc_list); + + if (netif_running(secy->netdev)) + return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); + + return 0; +} + +static int cn10k_mdo_upd_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct macsec_tx_sa *sw_tx_sa; + struct cn10k_mcs_txsc *txsc; + u8 sa_num; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + txsc->encoding_sa = secy->tx_sc.encoding_sa; + + sa_num = txsc->encoding_sa; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); + + if (netif_running(secy->netdev)) { + cn10k_mcs_sync_stats(pfvf, secy, txsc); + + err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_secy(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); + cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true); + cn10k_mcs_delete_txsc(pfvf, txsc); + list_del(&txsc->entry); + kfree(txsc); + + return 0; +} + +static int cn10k_mdo_add_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num])) + return -ENOSPC; + + memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len); + txsc->sa_bmap |= 1 << sa_num; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); + if (err) + return err; + + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + if (err) + return err; + + err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, + sa_num, sw_tx_sa->active); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (netif_running(secy->netdev)) { + /* Keys cannot be changed after creation */ + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn_halves.lower); + if (err) + return err; + + err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, + sa_num, sw_tx_sa->active); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_txsa(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); + txsc->sa_bmap &= ~(1 << sa_num); + + return 0; +} + +static int cn10k_mdo_add_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_rxsc *rxsc; + struct cn10k_mcs_txsc *txsc; + int err; + + txsc = cn10k_mcs_get_txsc(cfg, secy); + if (!txsc) + return -ENOENT; + + rxsc = cn10k_mcs_create_rxsc(pfvf); + if (IS_ERR(rxsc)) + return -ENOSPC; + + rxsc->sw_secy = ctx->secy; + rxsc->sw_rxsc = ctx->rx_sc; + list_add(&rxsc->entry, &cfg->rxsc_list); + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx); + if (err) + return err; + + err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + bool enable = ctx->rx_sc->active; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + if (netif_running(secy->netdev)) + return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, + enable, MCS_RX); + + return 0; +} + +static int cn10k_mdo_del_rxsc(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX); + cn10k_mcs_delete_rxsc(pfvf, rxsc); + list_del(&rxsc->entry); + kfree(rxsc); + + return 0; +} + +static int cn10k_mdo_add_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; + u64 next_pn = rx_sa->next_pn_halves.lower; + struct macsec_secy *secy = ctx->secy; + bool sa_in_use = rx_sa->active; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + int err; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num])) + return -ENOSPC; + + memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len); + rxsc->sa_bmap |= 1 << sa_num; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, + sa_num, sa_in_use); + if (err) + return err; + + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; + u64 next_pn = rx_sa->next_pn_halves.lower; + struct macsec_secy *secy = ctx->secy; + bool sa_in_use = rx_sa->active; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + int err; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + if (netif_running(secy->netdev)) { + err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use); + if (err) + return err; + + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn); + if (err) + return err; + } + + return 0; +} + +static int cn10k_mdo_del_rxsa(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false); + cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); + + rxsc->sa_bmap &= ~(1 << sa_num); + + return 0; +} + +static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx) +{ + struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 }; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false); + ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt; + ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt; + + cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); + txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; + txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; + txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; + if (secy->validate_frames == MACSEC_VALIDATE_STRICT) + txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; + else + txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; + txsc->stats.InPktsOverrun = 0; + + ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag; + ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged; + ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag; + ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI; + ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI; + ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun; + + return 0; +} + +static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sc_stats rsp = { 0 }; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false); + + ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt; + ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; + ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt; + ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt; + + return 0; +} + +static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sa_stats rsp = { 0 }; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_txsc *txsc; + + txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); + if (!txsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false); + + ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt; + ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; + + return 0; +} + +static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx) +{ + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_secy *secy = ctx->secy; + struct mcs_sc_stats rsp = { 0 }; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); + if (!rxsc) + return -ENOENT; + + cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true); + + rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt; + rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt; + + rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt; + rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt; + + if (secy->protect_frames) + rxsc->stats.InPktsLate += rsp.pkt_late_cnt; + else + rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt; + + if (secy->validate_frames == MACSEC_VALIDATE_CHECK) + rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt; + else + rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt; + + ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated; + ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted; + ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid; + ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid; + ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate; + ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed; + ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked; + ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK; + + return 0; +} + +static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx) +{ + struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; + struct otx2_nic *pfvf = netdev_priv(ctx->netdev); + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct mcs_sa_stats rsp = { 0 }; + u8 sa_num = ctx->sa.assoc_num; + struct cn10k_mcs_rxsc *rxsc; + + rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); + if (!rxsc) + return -ENOENT; + + if (sa_num >= CN10K_MCS_SA_PER_SC) + return -EOPNOTSUPP; + + cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false); + + ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt; + ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt; + ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt; + ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt; + ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt; + + return 0; +} + +static const struct macsec_ops cn10k_mcs_ops = { + .mdo_dev_open = cn10k_mdo_open, + .mdo_dev_stop = cn10k_mdo_stop, + .mdo_add_secy = cn10k_mdo_add_secy, + .mdo_upd_secy = cn10k_mdo_upd_secy, + .mdo_del_secy = cn10k_mdo_del_secy, + .mdo_add_rxsc = cn10k_mdo_add_rxsc, + .mdo_upd_rxsc = cn10k_mdo_upd_rxsc, + .mdo_del_rxsc = cn10k_mdo_del_rxsc, + .mdo_add_rxsa = cn10k_mdo_add_rxsa, + .mdo_upd_rxsa = cn10k_mdo_upd_rxsa, + .mdo_del_rxsa = cn10k_mdo_del_rxsa, + .mdo_add_txsa = cn10k_mdo_add_txsa, + .mdo_upd_txsa = cn10k_mdo_upd_txsa, + .mdo_del_txsa = cn10k_mdo_del_txsa, + .mdo_get_dev_stats = cn10k_mdo_get_dev_stats, + .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats, + .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats, + .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats, + .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats, +}; + +void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event) +{ + struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; + struct macsec_tx_sa *sw_tx_sa = NULL; + struct macsec_secy *secy = NULL; + struct cn10k_mcs_txsc *txsc; + u8 an; + + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return; + + if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT)) + return; + + /* Find the SecY to which the expired hardware SA is mapped */ + list_for_each_entry(txsc, &cfg->txsc_list, entry) { + for (an = 0; an < CN10K_MCS_SA_PER_SC; an++) + if (txsc->hw_sa_id[an] == event->sa_id) { + secy = txsc->sw_secy; + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]); + } + } + + if (secy && sw_tx_sa) + macsec_pn_wrapped(secy, sw_tx_sa); +} + +int cn10k_mcs_init(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct cn10k_mcs_cfg *cfg; + struct mcs_intr_cfg *req; + + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return 0; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + INIT_LIST_HEAD(&cfg->txsc_list); + INIT_LIST_HEAD(&cfg->rxsc_list); + pfvf->macsec_cfg = cfg; + + pfvf->netdev->features |= NETIF_F_HW_MACSEC; + pfvf->netdev->macsec_ops = &cn10k_mcs_ops; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox); + if (!req) + goto fail; + + req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; + + if (otx2_sync_mbox_msg(mbox)) + goto fail; + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot notify PN wrapped event\n"); + mutex_unlock(&mbox->lock); + return 0; +} + +void cn10k_mcs_free(struct otx2_nic *pfvf) +{ + if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) + return; + + cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true); + cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true); + kfree(pfvf->macsec_cfg); + pfvf->macsec_cfg = NULL; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 66da31f30d3e..9e10e7471b88 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -97,11 +97,6 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf) { struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; -#define OTX2_GET_RX_STATS(reg) \ - otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) -#define OTX2_GET_TX_STATS(reg) \ - otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) - dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); @@ -222,8 +217,11 @@ EXPORT_SYMBOL(otx2_set_mac_address); int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) { struct nix_frs_cfg *req; + u16 maxlen; int err; + maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); if (!req) { @@ -233,6 +231,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + /* Use max receive length supported by hardware for loopback devices */ + if (is_otx2_lbkvf(pfvf->pdev)) + req->maxlen = maxlen; + err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; @@ -262,6 +264,7 @@ unlock: mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_config_pause_frm); int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) { @@ -583,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev) } EXPORT_SYMBOL(otx2_get_mac_from_af); -int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc) { + u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC]; struct otx2_hw *hw = &pfvf->hw; struct nix_txschq_config *req; u64 schq, parent; @@ -599,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->lvl = lvl; req->num_regs = 1; - schq = hw->txschq_list[lvl][0]; + schq_list = hw->txschq_list; +#ifdef CONFIG_DCB + if (txschq_for_pfc) + schq_list = pfvf->pfc_schq_list; +#endif + + schq = schq_list[lvl][prio]; /* Set topology e.t.c configuration */ if (lvl == NIX_TXSCH_LVL_SMQ) { req->reg[0] = NIX_AF_SMQX_CFG(schq); @@ -608,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) (0x2ULL << 36); req->num_regs++; /* MDQ config */ - parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; + parent = schq_list[NIX_TXSCH_LVL_TL4][prio]; req->reg[1] = NIX_AF_MDQX_PARENT(schq); req->regval[1] = parent << 16; req->num_regs++; @@ -616,21 +626,29 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { - parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; + parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL3) { - parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; + parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; + if (lvl == hw->txschq_link_cfg_lvl) { + req->num_regs++; + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); + /* Enable this queue and backpressure + * and set relative channel + */ + req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; + } } else if (lvl == NIX_TXSCH_LVL_TL2) { - parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; + parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); req->regval[0] = parent << 16; @@ -638,11 +656,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; - req->num_regs++; - req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); - /* Enable this queue and backpressure */ - req->regval[2] = BIT_ULL(13) | BIT_ULL(12); - + if (lvl == hw->txschq_link_cfg_lvl) { + req->num_regs++; + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); + /* Enable this queue and backpressure + * and set relative channel + */ + req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; + } } else if (lvl == NIX_TXSCH_LVL_TL1) { /* Default config for TL1. * For VF this is always ignored. @@ -666,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) return otx2_sync_mbox_msg(&pfvf->mbox); } +EXPORT_SYMBOL(otx2_txschq_config); + +int otx2_smq_flush(struct otx2_nic *pfvf, int smq) +{ + struct nix_txschq_config *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->lvl = NIX_TXSCH_LVL_SMQ; + req->reg[0] = NIX_AF_SMQX_CFG(smq); + req->regval[0] |= BIT_ULL(49); + req->num_regs++; + + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} +EXPORT_SYMBOL(otx2_smq_flush); int otx2_txsch_alloc(struct otx2_nic *pfvf) { @@ -796,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ aq->sq.cq_ena = 1; aq->sq.ena = 1; - /* Only one SMQ is allocated, map all SQ's to that SMQ */ - aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ @@ -853,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) } sq->head = 0; + sq->cons_head = 0; sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; /* Set SQE threshold to 10% of total SQEs */ @@ -931,7 +977,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) if (!is_otx2_lbkvf(pfvf->pdev)) { /* Enable receive CQ backpressure */ aq->cq.bp_ena = 1; +#ifdef CONFIG_DCB + aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; +#else aq->cq.bpid = pfvf->bpid[0]; +#endif /* Set backpressure level is same as cq pass level */ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); @@ -1036,7 +1086,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) struct nix_lf_alloc_rsp *rsp; int err; - pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; + pfvf->qset.xqe_size = pfvf->hw.xqe_size; /* Get memory to put this msg */ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); @@ -1049,7 +1099,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; - nixlf->xqe_sz = NIX_XQESZ_W16; + nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; /* We don't know absolute NPA LF idx attached. * AF will replace 'RVU_DEFAULT_PF_FUNC' with * NPA LF attached to this RVU PF/VF. @@ -1211,7 +1261,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, */ if (pfvf->nix_blkaddr == BLKADDR_NIX1) aq->aura.bp_ena = 1; +#ifdef CONFIG_DCB + aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; +#else aq->aura.nix0_bpid = pfvf->bpid[0]; +#endif /* Set backpressure level for RQ's Aura */ aq->aura.bp = RQ_BP_LVL_AURA; @@ -1538,11 +1592,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) return -ENOMEM; req->chan_base = 0; - req->chan_cnt = 1; +#ifdef CONFIG_DCB + req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; + req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; +#else + req->chan_cnt = 1; req->bpid_per_chan = 0; +#endif + return otx2_sync_mbox_msg(&pfvf->mbox); } +EXPORT_SYMBOL(otx2_nix_config_bp); /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, @@ -1573,6 +1634,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, for (schq = 0; schq < rsp->schq[lvl]; schq++) pf->hw.txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; + + pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; } EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc); @@ -1704,6 +1767,56 @@ out: } EXPORT_SYMBOL(otx2_get_max_mtu); +int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct otx2_nic *pfvf = netdev_priv(netdev); + bool ntuple = !!(features & NETIF_F_NTUPLE); + bool tc = !!(features & NETIF_F_HW_TC); + + if ((changed & NETIF_F_NTUPLE) && !ntuple) + otx2_destroy_ntuple_flows(pfvf); + + if ((changed & NETIF_F_NTUPLE) && ntuple) { + if (!pfvf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable NTUPLE, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && tc) { + if (!pfvf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable TC, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && !tc && + pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) { + netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); + return -EBUSY; + } + + if ((changed & NETIF_F_NTUPLE) && ntuple && + (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { + netdev_err(netdev, + "Can't enable NTUPLE when TC is active, disable TC and retry\n"); + return -EINVAL; + } + + if ((changed & NETIF_F_HW_TC) && tc && + (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { + netdev_err(netdev, + "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); + #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ @@ -1715,4 +1828,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ } \ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 61e52812983f..282db6fe3b08 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -17,6 +17,9 @@ #include <linux/soc/marvell/octeontx2/asm.h> #include <net/pkt_cls.h> #include <net/devlink.h> +#include <linux/time64.h> +#include <linux/dim.h> +#include <uapi/linux/if_macsec.h> #include <mbox.h> #include <npc.h> @@ -31,6 +34,7 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 @@ -38,6 +42,11 @@ #define NAME_SIZE 32 +#ifdef CONFIG_DCB +/* Max priority supported for PFC */ +#define NIX_PF_PFC_PRIO_MAX 8 +#endif + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -53,6 +62,11 @@ enum arua_mapped_qtypes { /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ #define SEND_CQ_SKID 2000 +#define OTX2_GET_RX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) +#define OTX2_GET_TX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) + struct otx2_lmt_info { u64 lmt_addr; u16 lmt_id; @@ -178,13 +192,18 @@ struct otx2_hw { u16 rqpool_cnt; u16 sqpool_cnt; +#define OTX2_DEFAULT_RBUF_LEN 2048 + u16 rbuf_len; + u32 xqe_size; + /* NPA */ u32 stack_pg_ptrs; /* No of ptrs per stack page */ u32 stack_pg_bytes; /* Size of stack page */ u16 sqb_size; /* NIX */ - u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u8 txschq_link_cfg_lvl; + u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; u32 dwrr_mtu; @@ -226,6 +245,8 @@ struct otx2_hw { #define CN10K_MBOX 1 #define CN10K_LMTST 2 #define CN10K_RPM 3 +#define CN10K_PTP_ONESTEP 4 +#define CN10K_HW_MACSEC 5 unsigned long cap_flag; #define LMT_LINE_SIZE 128 @@ -259,6 +280,13 @@ struct refill_work { struct otx2_nic *pf; }; +/* PTPv2 originTimestamp structure */ +struct ptpv2_tstamp { + __be16 seconds_msb; /* 16 bits + */ + __be32 seconds_lsb; /* 32 bits = 48 bits*/ + __be32 nanoseconds; +} __packed; + struct otx2_ptp { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; @@ -272,6 +300,11 @@ struct otx2_ptp { u64 thresh; struct ptp_pin_desc extts_config; + u64 (*convert_rx_ptp_tstmp)(u64 timestamp); + u64 (*convert_tx_ptp_tstmp)(u64 timestamp); + struct delayed_work synctstamp_work; + u64 tstamp; + u32 base_ns; }; #define OTX2_HW_TIMESTAMP_LEN 8 @@ -301,8 +334,8 @@ struct otx2_flow_config { #define OTX2_VF_VLAN_TX_INDEX 1 u16 max_flows; u8 dmacflt_max_flows; - u8 *bmap_to_dmacindex; - unsigned long dmacflt_bmap; + u32 *bmap_to_dmacindex; + unsigned long *dmacflt_bmap; struct list_head flow_list; }; @@ -321,6 +354,66 @@ struct dev_hw_ops { void (*aura_freeptr)(void *dev, int aura, u64 buf); }; +#define CN10K_MCS_SA_PER_SC 4 + +/* Stats which need to be accumulated in software because + * of shared counters in hardware. + */ +struct cn10k_txsc_stats { + u64 InPktsUntagged; + u64 InPktsNoTag; + u64 InPktsBadTag; + u64 InPktsUnknownSCI; + u64 InPktsNoSCI; + u64 InPktsOverrun; +}; + +struct cn10k_rxsc_stats { + u64 InOctetsValidated; + u64 InOctetsDecrypted; + u64 InPktsUnchecked; + u64 InPktsDelayed; + u64 InPktsOK; + u64 InPktsInvalid; + u64 InPktsLate; + u64 InPktsNotValid; + u64 InPktsNotUsingSA; + u64 InPktsUnusedSA; +}; + +struct cn10k_mcs_txsc { + struct macsec_secy *sw_secy; + struct cn10k_txsc_stats stats; + struct list_head entry; + enum macsec_validation_type last_validate_frames; + bool last_protect_frames; + u16 hw_secy_id_tx; + u16 hw_secy_id_rx; + u16 hw_flow_id; + u16 hw_sc_id; + u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; + u8 sa_bmap; + u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; + u8 encoding_sa; +}; + +struct cn10k_mcs_rxsc { + struct macsec_secy *sw_secy; + struct macsec_rx_sc *sw_rxsc; + struct cn10k_rxsc_stats stats; + struct list_head entry; + u16 hw_flow_id; + u16 hw_sc_id; + u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; + u8 sa_bmap; + u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; +}; + +struct cn10k_mcs_cfg { + struct list_head txsc_list; + struct list_head rxsc_list; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -344,6 +437,8 @@ struct otx2_nic { #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) +#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) +#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) u64 flags; u64 *cq_op_addr; @@ -396,6 +491,20 @@ struct otx2_nic { /* Devlink */ struct otx2_devlink *dl; +#ifdef CONFIG_DCB + /* PFC */ + u8 pfc_en; + u8 *queue_to_pfc_map; + u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; +#endif + + /* napi event count. It is needed for adaptive irq coalescing. */ + u32 napi_events; + +#if IS_ENABLED(CONFIG_MACSEC) + struct cn10k_mcs_cfg *macsec_cfg; +#endif }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -435,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev) midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } +static inline bool is_dev_cn10kb(struct pci_dev *pdev) +{ + return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -464,7 +578,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_MBOX, &hw->cap_flag); __set_bit(CN10K_LMTST, &hw->cap_flag); __set_bit(CN10K_RPM, &hw->cap_flag); + __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); } + + if (is_dev_cn10kb(pfvf->pdev)) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -603,6 +721,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, size++; tar_addr |= ((size - 1) & 0x7) << 4; } + dma_wmb(); memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); /* Perform LMTST flush */ cn10k_lmt_flush(val, tar_addr); @@ -719,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ struct _rsp_type *rsp); \ MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M /* Time to wait before watchdog kicks off */ @@ -761,6 +881,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, dir, DMA_ATTR_SKIP_CPU_SYNC); } +static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) +{ +#ifdef CONFIG_DCB + if (pfvf->pfc_alloc_status[qidx]) + return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; +#endif + + return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; +} + /* MSI-X APIs */ void otx2_free_cints(struct otx2_nic *pfvf, int n); void otx2_set_cints_affinity(struct otx2_nic *pfvf); @@ -783,7 +913,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); void otx2_sq_free_sqbs(struct otx2_nic *pfvf); int otx2_config_nix(struct otx2_nic *pfvf); int otx2_config_nix_queues(struct otx2_nic *pfvf); -int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); int otx2_txsch_alloc(struct otx2_nic *pfvf); int otx2_txschq_stop(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); @@ -862,6 +992,10 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); +int otx2_handle_ntuple_tc_features(struct net_device *netdev, + netdev_features_t features); +int otx2_smq_flush(struct otx2_nic *pfvf, int smq); + /* tc support */ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); @@ -870,9 +1004,35 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); -int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); -int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); -int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); + +#ifdef CONFIG_DCB +/* DCB support*/ +void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); +int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); +int otx2_dcbnl_set_ops(struct net_device *dev); +/* PFC support */ +int otx2_pfc_txschq_config(struct otx2_nic *pfvf); +int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); +int otx2_pfc_txschq_update(struct otx2_nic *pfvf); +int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); +#endif + +#if IS_ENABLED(CONFIG_MACSEC) +/* MACSEC offload support */ +int cn10k_mcs_init(struct otx2_nic *pfvf); +void cn10k_mcs_free(struct otx2_nic *pfvf); +void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); +#else +static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } +static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} +static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, + struct mcs_intr_info *event) +{} +#endif /* CONFIG_MACSEC */ + #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c new file mode 100644 index 000000000000..ccaf97bb1ce0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. + * + */ + +#include "otx2_common.h" + +static int otx2_check_pfc_config(struct otx2_nic *pfvf) +{ + u8 tx_queues = pfvf->hw.tx_queues, prio; + u8 pfc_en = pfvf->pfc_en; + + for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { + if ((pfc_en & (1 << prio)) && + prio > tx_queues - 1) { + dev_warn(pfvf->dev, + "Increase number of tx queues from %d to %d to support PFC.\n", + tx_queues, prio + 1); + return -EINVAL; + } + } + + return 0; +} + +int otx2_pfc_txschq_config(struct otx2_nic *pfvf) +{ + u8 pfc_en, pfc_bit_set; + int prio, lvl, err; + + pfc_en = pfvf->pfc_en; + for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { + pfc_bit_set = pfc_en & (1 << prio); + + /* Either PFC bit is not set + * or tx scheduler is not allocated for the priority + */ + if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio]) + continue; + + /* configure the scheduler for the tls*/ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + err = otx2_txschq_config(pfvf, lvl, prio, true); + if (err) { + dev_err(pfvf->dev, + "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n", + __func__, lvl, prio); + return err; + } + } + } + + return 0; +} + +static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) +{ + struct nix_txsch_alloc_req *req; + struct nix_txsch_alloc_rsp *rsp; + int lvl, rc; + + /* Get memory to put this msg */ + req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); + if (!req) + return -ENOMEM; + + /* Request one schq per level upto max level as configured + * link config level. These rest of the scheduler can be + * same as hw.txschq_list. + */ + for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) + req->schq[lvl] = 1; + + rc = otx2_sync_mbox_msg(&pfvf->mbox); + if (rc) + return rc; + + rsp = (struct nix_txsch_alloc_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + /* Setup transmit scheduler list */ + for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) { + if (!rsp->schq[lvl]) + return -ENOSPC; + + pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0]; + } + + /* Set the Tx schedulers for rest of the levels same as + * hw.txschq_list as those will be common for all. + */ + for (; lvl < NIX_TXSCH_LVL_CNT; lvl++) + pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0]; + + pfvf->pfc_alloc_status[prio] = true; + return 0; +} + +int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) +{ + u8 pfc_en = pfvf->pfc_en; + u8 pfc_bit_set; + int err, prio; + + for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { + pfc_bit_set = pfc_en & (1 << prio); + + if (!pfc_bit_set || pfvf->pfc_alloc_status[prio]) + continue; + + /* Add new scheduler to the priority */ + err = otx2_pfc_txschq_alloc_one(pfvf, prio); + if (err) { + dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__); + return err; + } + } + + return 0; +} + +static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) +{ + struct nix_txsch_free_req *free_req; + + mutex_lock(&pfvf->mbox.lock); + /* free PFC TLx nodes */ + free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); + if (!free_req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + free_req->flags = TXSCHQ_FREE_ALL; + otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + pfvf->pfc_alloc_status[prio] = false; + return 0; +} + +static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio) +{ + struct nix_cn10k_aq_enq_req *cn10k_sq_aq; + struct net_device *dev = pfvf->netdev; + bool if_up = netif_running(dev); + struct nix_aq_enq_req *sq_aq; + + if (if_up) { + if (pfvf->pfc_alloc_status[prio]) + netif_tx_stop_all_queues(pfvf->netdev); + else + netif_tx_stop_queue(netdev_get_tx_queue(dev, prio)); + } + + if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); + if (!cn10k_sq_aq) + return -ENOMEM; + + /* Fill AQ info */ + cn10k_sq_aq->qidx = prio; + cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ; + cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE; + + /* Fill fields to update */ + cn10k_sq_aq->sq.ena = 1; + cn10k_sq_aq->sq_mask.ena = 1; + cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0); + cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio); + } else { + sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!sq_aq) + return -ENOMEM; + + /* Fill AQ info */ + sq_aq->qidx = prio; + sq_aq->ctype = NIX_AQ_CTYPE_SQ; + sq_aq->op = NIX_AQ_INSTOP_WRITE; + + /* Fill fields to update */ + sq_aq->sq.ena = 1; + sq_aq->sq_mask.ena = 1; + sq_aq->sq_mask.smq = GENMASK(8, 0); + sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio); + } + + otx2_sync_mbox_msg(&pfvf->mbox); + + if (if_up) { + if (pfvf->pfc_alloc_status[prio]) + netif_tx_start_all_queues(pfvf->netdev); + else + netif_tx_start_queue(netdev_get_tx_queue(dev, prio)); + } + + return 0; +} + +int otx2_pfc_txschq_update(struct otx2_nic *pfvf) +{ + bool if_up = netif_running(pfvf->netdev); + u8 pfc_en = pfvf->pfc_en, pfc_bit_set; + struct mbox *mbox = &pfvf->mbox; + int err, prio; + + mutex_lock(&mbox->lock); + for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { + pfc_bit_set = pfc_en & (1 << prio); + + /* tx scheduler was created but user wants to disable now */ + if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) { + mutex_unlock(&mbox->lock); + if (if_up) + netif_tx_stop_all_queues(pfvf->netdev); + + otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]); + if (if_up) + netif_tx_start_all_queues(pfvf->netdev); + + /* delete the schq */ + err = otx2_pfc_txschq_stop_one(pfvf, prio); + if (err) { + dev_err(pfvf->dev, + "%s failed to stop PFC tx schedulers for priority: %d\n", + __func__, prio); + return err; + } + + mutex_lock(&mbox->lock); + goto update_sq_smq_map; + } + + /* Either PFC bit is not set + * or Tx scheduler is already mapped for the priority + */ + if (!pfc_bit_set || pfvf->pfc_alloc_status[prio]) + continue; + + /* Add new scheduler to the priority */ + err = otx2_pfc_txschq_alloc_one(pfvf, prio); + if (err) { + mutex_unlock(&mbox->lock); + dev_err(pfvf->dev, + "%s failed to allocate PFC tx schedulers for priority: %d\n", + __func__, prio); + return err; + } + +update_sq_smq_map: + err = otx2_pfc_update_sq_smq_mapping(pfvf, prio); + if (err) { + mutex_unlock(&mbox->lock); + dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio); + return err; + } + } + + err = otx2_pfc_txschq_config(pfvf); + mutex_unlock(&mbox->lock); + if (err) + return err; + + return 0; +} + +int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) +{ + u8 pfc_en, pfc_bit_set; + int prio, err; + + pfc_en = pfvf->pfc_en; + for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { + pfc_bit_set = pfc_en & (1 << prio); + if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio]) + continue; + + /* Delete the existing scheduler */ + err = otx2_pfc_txschq_stop_one(pfvf, prio); + if (err) { + dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__); + return err; + } + } + + return 0; +} + +int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) +{ + struct cgx_pfc_cfg *req; + struct cgx_pfc_rsp *rsp; + int err = 0; + + if (is_otx2_lbkvf(pfvf->pdev)) + return 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox); + if (!req) { + err = -ENOMEM; + goto unlock; + } + + if (pfvf->pfc_en) { + req->rx_pause = true; + req->tx_pause = true; + } else { + req->rx_pause = false; + req->tx_pause = false; + } + req->pfc_en = pfvf->pfc_en; + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) { + rsp = (struct cgx_pfc_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) { + dev_warn(pfvf->dev, + "Failed to config PFC\n"); + err = -EPERM; + } + } +unlock: + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, + bool pfc_enable) +{ + bool if_up = netif_running(pfvf->netdev); + struct npa_aq_enq_req *npa_aq; + struct nix_aq_enq_req *aq; + int err = 0; + + if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) { + dev_warn(pfvf->dev, + "PFC enable not permitted as Priority %d already mapped to Queue %d\n", + pfvf->queue_to_pfc_map[qidx], qidx); + return; + } + + if (if_up) { + netif_tx_stop_all_queues(pfvf->netdev); + netif_carrier_off(pfvf->netdev); + } + + pfvf->queue_to_pfc_map[qidx] = vlan_prio; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) { + err = -ENOMEM; + goto out; + } + + aq->cq.bpid = pfvf->bpid[vlan_prio]; + aq->cq_mask.bpid = GENMASK(8, 0); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + otx2_sync_mbox_msg(&pfvf->mbox); + + npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!npa_aq) { + err = -ENOMEM; + goto out; + } + npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio]; + npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0); + + /* Fill NPA AQ info */ + npa_aq->aura_id = qidx; + npa_aq->ctype = NPA_AQ_CTYPE_AURA; + npa_aq->op = NPA_AQ_INSTOP_WRITE; + otx2_sync_mbox_msg(&pfvf->mbox); + +out: + if (if_up) { + netif_carrier_on(pfvf->netdev); + netif_tx_start_all_queues(pfvf->netdev); + } + + if (err) + dev_warn(pfvf->dev, + "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n", + qidx, err); +} + +static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = pfvf->pfc_en; + + return 0; +} + +static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int err; + + /* Save PFC configuration to interface */ + pfvf->pfc_en = pfc->pfc_en; + + if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) + goto process_pfc; + + /* Check if the PFC configuration can be + * supported by the tx queue configuration + */ + err = otx2_check_pfc_config(pfvf); + if (err) + return err; + +process_pfc: + err = otx2_config_priority_flow_ctrl(pfvf); + if (err) + return err; + + /* Request Per channel Bpids */ + if (pfc->pfc_en) + otx2_nix_config_bp(pfvf, true); + + err = otx2_pfc_txschq_update(pfvf); + if (err) { + dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); + return err; + } + + return 0; +} + +static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode) +{ + return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; +} + +static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = { + .ieee_getpfc = otx2_dcbnl_ieee_getpfc, + .ieee_setpfc = otx2_dcbnl_ieee_setpfc, + .getdcbx = otx2_dcbnl_getdcbx, + .setdcbx = otx2_dcbnl_setdcbx, +}; + +int otx2_dcbnl_set_ops(struct net_device *dev) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues, + GFP_KERNEL); + if (!pfvf->queue_to_pfc_map) + return -ENOMEM; + dev->dcbnl_ops = &otx2_dcbnl_ops; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 2ec800f741d8..80d853b343f9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -8,7 +8,7 @@ #include "otx2_common.h" static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, - u8 *dmac_index) + u32 *dmac_index) { struct cgx_mac_addr_add_req *req; struct cgx_mac_addr_add_rsp *rsp; @@ -35,9 +35,10 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, return err; } -static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index) { struct cgx_mac_addr_set_or_get *req; + struct cgx_mac_addr_set_or_get *rsp; int err; mutex_lock(&pf->mbox.lock); @@ -48,16 +49,31 @@ static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) return -ENOMEM; } + req->index = *dmac_index; + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_mac_addr_set_or_get *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + + if (IS_ERR_OR_NULL(rsp)) { + err = -EINVAL; + goto out; + } + + *dmac_index = rsp->index; +out: mutex_unlock(&pf->mbox.lock); return err; } -int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) { - u8 *dmacindex; + u32 *dmacindex; /* Store dmacindex returned by CGX/RPM driver which will * be used for macaddr update/remove @@ -65,13 +81,13 @@ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) - return otx2_dmacflt_add_pfmac(pf); + return otx2_dmacflt_add_pfmac(pf, dmacindex); else return otx2_dmacflt_do_add(pf, mac, dmacindex); } static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, - u8 dmac_index) + u32 dmac_index) { struct cgx_mac_addr_del_req *req; int err; @@ -91,9 +107,9 @@ static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, return err; } -static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index) { - struct msg_req *req; + struct cgx_mac_addr_reset_req *req; int err; mutex_lock(&pf->mbox.lock); @@ -102,6 +118,7 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) mutex_unlock(&pf->mbox.lock); return -ENOMEM; } + req->index = dmac_index; err = otx2_sync_mbox_msg(&pf->mbox); @@ -110,12 +127,12 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) } int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, - u8 bit_pos) + u32 bit_pos) { - u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) - return otx2_dmacflt_remove_pfmac(pf); + return otx2_dmacflt_remove_pfmac(pf, dmacindex); else return otx2_dmacflt_do_remove(pf, mac, dmacindex); } @@ -144,6 +161,12 @@ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) rsp = (struct cgx_max_dmac_entries_get_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + + if (IS_ERR_OR_NULL(rsp)) { + err = -EINVAL; + goto out; + } + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; out: @@ -151,9 +174,10 @@ out: return err; } -int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) { struct cgx_mac_addr_update_req *req; + struct cgx_mac_addr_update_rsp *rsp; int rc; mutex_lock(&pf->mbox.lock); @@ -167,8 +191,19 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) ether_addr_copy(req->mac_addr, mac); req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + /* check the response and change index */ + rc = otx2_sync_mbox_msg(&pf->mbox); + if (rc) + goto out; + + rsp = (struct cgx_mac_addr_update_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; + +out: mutex_unlock(&pf->mbox.lock); return rc; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 80d4ce61f442..0eb74e8c553d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); } static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) @@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev, } static void otx2_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_qset *qs = &pfvf->qset; @@ -369,12 +371,19 @@ static void otx2_get_ringparam(struct net_device *netdev, ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); + kernel_ring->rx_buf_len = pfvf->hw.rbuf_len; + kernel_ring->cqe_size = pfvf->hw.xqe_size; } static int otx2_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); + u32 rx_buf_len = kernel_ring->rx_buf_len; + u32 old_rx_buf_len = pfvf->hw.rbuf_len; + u32 xqe_size = kernel_ring->cqe_size; bool if_up = netif_running(netdev); struct otx2_qset *qs = &pfvf->qset; u32 rx_count, tx_count; @@ -382,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev, if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; + /* Hardware supports max size of 32k for a receive buffer + * and 1536 is typical ethernet frame size. + */ + if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) { + netdev_err(netdev, + "Receive buffer range is 1536 - 32768"); + return -EINVAL; + } + + if (xqe_size != 128 && xqe_size != 512) { + netdev_err(netdev, + "Completion event size must be 128 or 512"); + return -EINVAL; + } + /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ rx_count = ring->rx_pending; /* On some silicon variants a skid or reserved CQEs are @@ -399,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev, Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); - if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) + if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt && + rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size) return 0; if (if_up) @@ -409,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev, qs->sqe_cnt = tx_count; qs->rqe_cnt = rx_count; + pfvf->hw.rbuf_len = rx_buf_len; + pfvf->hw.xqe_size = xqe_size; + if (if_up) return netdev->netdev_ops->ndo_open(netdev); @@ -427,6 +455,14 @@ static int otx2_get_coalesce(struct net_device *netdev, cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; cmd->tx_coalesce_usecs = hw->cq_time_wait; cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; + if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { + cmd->use_adaptive_rx_coalesce = 1; + cmd->use_adaptive_tx_coalesce = 1; + } else { + cmd->use_adaptive_rx_coalesce = 0; + cmd->use_adaptive_tx_coalesce = 0; + } return 0; } @@ -438,11 +474,30 @@ static int otx2_set_coalesce(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; + u8 priv_coalesce_status; int qidx; if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) return 0; + if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) { + netdev_err(netdev, + "adaptive-rx should be same as adaptive-tx"); + return -EINVAL; + } + + /* Check and update coalesce status */ + if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { + priv_coalesce_status = 1; + if (!ec->use_adaptive_rx_coalesce) + pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED; + } else { + priv_coalesce_status = 0; + if (ec->use_adaptive_rx_coalesce) + pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED; + } + /* 'cq_time_wait' is 8bit and is in multiple of 100ns, * so clamp the user given value to the range of 1 to 25usec. */ @@ -466,9 +521,9 @@ static int otx2_set_coalesce(struct net_device *netdev, * so clamp the user given value to the range of 1 to 64k. */ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, - 1, U16_MAX); + 1, NAPI_POLL_WEIGHT); ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, - 1, U16_MAX); + 1, NAPI_POLL_WEIGHT); /* Rx and Tx are mapped to same CQ, check which one * is changed, if both then choose the min. @@ -481,6 +536,17 @@ static int otx2_set_coalesce(struct net_device *netdev, hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, ec->tx_max_coalesced_frames); + /* Reset 'cq_time_wait' and 'cq_ecount_wait' to + * default values if coalesce status changed from + * 'on' to 'off'. + */ + if (priv_coalesce_status && + ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) != + OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { + hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT; + hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; + } + if (netif_running(netdev)) { for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) otx2_config_irq_coalescing(pfvf, qidx); @@ -897,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev, info->phc_index = otx2_ptp_clock_index(pfvf); - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) + info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); return 0; } @@ -1202,7 +1270,10 @@ end: static const struct ethtool_ops otx2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | + ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2_get_drvinfo, .get_strings = otx2_get_strings, @@ -1244,8 +1315,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev, { struct otx2_nic *vf = netdev_priv(netdev); - strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); + strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); } static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -1321,7 +1392,10 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | + ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2vf_get_drvinfo, .get_strings = otx2vf_get_strings, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 77a13fb555fb..709fc0114fbd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,11 +18,13 @@ struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; u32 location; - u16 entry; + u32 entry; bool is_vf; u8 rss_ctx_id; +#define DMAC_FILTER_RULE BIT(0) +#define PFC_FLOWCTRL_RULE BIT(1) + u16 rule_type; int vf; - bool dmac_filter; }; enum dmac_req { @@ -230,6 +232,9 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) return 0; } +/* TODO : revisit on size */ +#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) + int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg; @@ -240,6 +245,12 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) if (!pfvf->flow_cfg) return -ENOMEM; + pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev, + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), + sizeof(long), GFP_KERNEL); + if (!pfvf->flow_cfg->dmacflt_bmap) + return -ENOMEM; + flow_cfg = pfvf->flow_cfg; INIT_LIST_HEAD(&flow_cfg->flow_list); flow_cfg->max_flows = 0; @@ -257,6 +268,12 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) if (!pf->flow_cfg) return -ENOMEM; + pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev, + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), + sizeof(long), GFP_KERNEL); + if (!pf->flow_cfg->dmacflt_bmap) + return -ENOMEM; + INIT_LIST_HEAD(&pf->flow_cfg->flow_list); /* Allocate bare minimum number of MCAM entries needed for @@ -282,7 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return 0; pf->flow_cfg->bmap_to_dmacindex = - devm_kzalloc(pf->dev, sizeof(u8) * + devm_kzalloc(pf->dev, sizeof(u32) * pf->flow_cfg->dmacflt_max_flows, GFP_KERNEL); @@ -353,7 +370,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); - if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(netdev, "Add %pM to CGX/RPM DMAC filters list as well\n", @@ -436,7 +453,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) return 0; if (flow_cfg->nr_flows == flow_cfg->max_flows || - bitmap_weight(&flow_cfg->dmacflt_bmap, + !bitmap_empty(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; else @@ -899,6 +916,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; +#ifdef CONFIG_DCB + int vlan_prio, qidx, pfc_rule = 0; +#endif struct npc_install_flow_req *req; int err, vf = 0; @@ -940,6 +960,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) mutex_unlock(&pfvf->mbox.lock); return -EINVAL; } + +#ifdef CONFIG_DCB + /* Identify PFC rule if PFC enabled and ntuple rule is vlan */ + if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) && + pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) { + vlan_prio = ntohs(req->packet.vlan_tci) & + ntohs(req->mask.vlan_tci); + + /* Get the priority */ + vlan_prio >>= 13; + flow->rule_type |= PFC_FLOWCTRL_RULE; + /* Check if PFC enabled for this priority */ + if (pfvf->pfc_en & BIT(vlan_prio)) { + pfc_rule = true; + qidx = req->index; + } + } +#endif } /* ethtool ring_cookie has (VF + 1) for VF */ @@ -951,6 +989,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); + +#ifdef CONFIG_DCB + if (!err && pfc_rule) + otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true); +#endif + mutex_unlock(&pfvf->mbox.lock); return err; } @@ -966,7 +1010,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, return -ENOMEM; pf_mac->entry = 0; - pf_mac->dmac_filter = true; + pf_mac->rule_type |= DMAC_FILTER_RULE; pf_mac->location = pfvf->flow_cfg->max_flows; memcpy(&pf_mac->flow_spec, &flow->flow_spec, sizeof(struct ethtool_rx_flow_spec)); @@ -981,7 +1025,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, otx2_add_flow_to_list(pfvf, pf_mac); pfvf->flow_cfg->nr_flows++; - set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + set_bit(0, pfvf->flow_cfg->dmacflt_bmap); return 0; } @@ -1031,11 +1075,11 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) eth_hdr = &flow->flow_spec.h_u.ether_spec; /* Sync dmac filter table with updated fields */ - if (flow->dmac_filter) + if (flow->rule_type & DMAC_FILTER_RULE) return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, flow->entry); - if (bitmap_full(&flow_cfg->dmacflt_bmap, + if (bitmap_full(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) { netdev_warn(pfvf->netdev, "Can't insert the rule %d as max allowed dmac filters are %d\n", @@ -1049,17 +1093,17 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) } /* Install PF mac address to DMAC filter list */ - if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + if (!test_bit(0, flow_cfg->dmacflt_bmap)) otx2_add_flow_with_pfmac(pfvf, flow); - flow->dmac_filter = true; - flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow->rule_type |= DMAC_FILTER_RULE; + flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows); fsp->location = flow_cfg->max_flows + flow->entry; flow->flow_spec.location = fsp->location; flow->location = fsp->location; - set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + set_bit(flow->entry, flow_cfg->dmacflt_bmap); otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); } else { @@ -1120,16 +1164,17 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) bool found = false; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { - if (iter->dmac_filter && iter->entry == 0) { + if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) { eth_hdr = &iter->flow_spec.h_u.ether_spec; if (req == DMAC_ADDR_DEL) { otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 0); - clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + clear_bit(0, pfvf->flow_cfg->dmacflt_bmap); found = true; } else { ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); } break; @@ -1156,7 +1201,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) if (!flow) return -ENOENT; - if (flow->dmac_filter) { + if (flow->rule_type & DMAC_FILTER_RULE) { struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; /* user not allowed to remove dmac filter with interface mac */ @@ -1165,15 +1210,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, flow->entry); - clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + clear_bit(flow->entry, flow_cfg->dmacflt_bmap); /* If all dmac filters are removed delete macfilter with * interface mac address and configure CGX/RPM block in * promiscuous mode */ - if (bitmap_weight(&flow_cfg->dmacflt_bmap, + if (bitmap_weight(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows) == 1) otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); } else { +#ifdef CONFIG_DCB + if (flow->rule_type & PFC_FLOWCTRL_RULE) + otx2_update_bpid_in_rqctx(pfvf, 0, + flow->flow_spec.ring_cookie, + false); +#endif + err = otx2_remove_flow_msg(pfvf, flow->entry, false); } @@ -1383,7 +1435,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) struct ethhdr *eth_hdr; list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { - if (iter->dmac_filter) { + if (iter->rule_type & DMAC_FILTER_RULE) { eth_hdr = &iter->flow_spec.h_u.ether_spec; otx2_dmacflt_add(pf, eth_hdr->h_dest, iter->entry); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 1e0d0c9c1dac..303930499a4c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -15,6 +15,7 @@ #include <net/ip.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> +#include <linux/bitfield.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -394,7 +395,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, dst_mdev->msg_size = mbox_hdr->msg_size; dst_mdev->num_msgs = num_msgs; err = otx2_sync_mbox_msg(dst_mbox); - if (err) { + /* Error code -EIO indicate there is a communication failure + * to the AF. Rest of the error codes indicate that AF processed + * VF messages and set the error codes in response messages + * (if any) so simply forward responses to VF. + */ + if (err == -EIO) { dev_warn(pf->dev, "AF not responding to VF%d messages\n", vf); /* restore PF mbase and exit */ @@ -853,6 +859,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf) } } +int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, + struct mcs_intr_info *event, + struct msg_rsp *rsp) +{ + cn10k_handle_mcs_event(pf, event); + + return 0; +} + int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, struct cgx_link_info_msg *msg, struct msg_rsp *rsp) @@ -912,6 +927,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, return err; \ } MBOX_UP_CGX_MESSAGES +MBOX_UP_MCS_MESSAGES #undef M break; default: @@ -1115,7 +1131,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; - if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(pf->netdev, "CGX/RPM internal loopback might not work as DMAC filters are active\n"); @@ -1156,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev, } EXPORT_SYMBOL(otx2_set_real_num_queues); +static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { + "NIX_SQOPERR_OOR", + "NIX_SQOPERR_CTX_FAULT", + "NIX_SQOPERR_CTX_POISON", + "NIX_SQOPERR_DISABLED", + "NIX_SQOPERR_SIZE_ERR", + "NIX_SQOPERR_OFLOW", + "NIX_SQOPERR_SQB_NULL", + "NIX_SQOPERR_SQB_FAULT", + "NIX_SQOPERR_SQE_SZ_ZERO", +}; + +static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { + "NIX_MNQERR_SQ_CTX_FAULT", + "NIX_MNQERR_SQ_CTX_POISON", + "NIX_MNQERR_SQB_FAULT", + "NIX_MNQERR_SQB_POISON", + "NIX_MNQERR_TOTAL_ERR", + "NIX_MNQERR_LSO_ERR", + "NIX_MNQERR_CQ_QUERY_ERR", + "NIX_MNQERR_MAX_SQE_SIZE_ERR", + "NIX_MNQERR_MAXLEN_ERR", + "NIX_MNQERR_SQE_SIZEM1_ZERO", +}; + +static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { + "NIX_SND_STATUS_GOOD", + "NIX_SND_STATUS_SQ_CTX_FAULT", + "NIX_SND_STATUS_SQ_CTX_POISON", + "NIX_SND_STATUS_SQB_FAULT", + "NIX_SND_STATUS_SQB_POISON", + "NIX_SND_STATUS_HDR_ERR", + "NIX_SND_STATUS_EXT_ERR", + "NIX_SND_STATUS_JUMP_FAULT", + "NIX_SND_STATUS_JUMP_POISON", + "NIX_SND_STATUS_CRC_ERR", + "NIX_SND_STATUS_IMM_ERR", + "NIX_SND_STATUS_SG_ERR", + "NIX_SND_STATUS_MEM_ERR", + "NIX_SND_STATUS_INVALID_SUBDC", + "NIX_SND_STATUS_SUBDC_ORDER_ERR", + "NIX_SND_STATUS_DATA_FAULT", + "NIX_SND_STATUS_DATA_POISON", + "NIX_SND_STATUS_NPC_DROP_ACTION", + "NIX_SND_STATUS_LOCK_VIOL", + "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", + "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", + "NIX_SND_STATUS_NPC_MCAST_ABORT", + "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", + "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", + "NIX_SND_STATUS_SEND_STATS_ERR", +}; + static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; @@ -1189,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) /* SQ */ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { + u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; + u8 sq_op_err_code, mnq_err_code, snd_err_code; + + /* Below debug registers captures first errors corresponding to + * those registers. We don't have to check against SQ qid as + * these are fatal errors. + */ + ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | (val & NIX_SQINT_BITS)); - if (!(val & (NIX_SQINT_BITS | BIT_ULL(42)))) - continue; - if (val & BIT_ULL(42)) { netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); - } else { - if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) { - netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", - qidx, - otx2_read64(pf, - NIX_LF_SQ_OP_ERR_DBG)); - otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) { - netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", - qidx, - otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); - otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) { - netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", - qidx, - otx2_read64(pf, - NIX_LF_SEND_ERR_DBG)); - otx2_write64(pf, NIX_LF_SEND_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) - netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", - qidx); + goto done; + } + + sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); + if (!(sq_op_err_dbg & BIT(44))) + goto chk_mnq_err_dbg; + + sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", + qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); + + otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); + + if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) + goto chk_mnq_err_dbg; + + /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. + * TODO: But we are in irq context. How to call mbox functions which does sleep + */ + +chk_mnq_err_dbg: + mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); + if (!(mnq_err_dbg & BIT(44))) + goto chk_snd_err_dbg; + + mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", + qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); + otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); + +chk_snd_err_dbg: + snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); + if (snd_err_dbg & BIT(44)) { + snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", + qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); + otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); } +done: + /* Print values and reset */ + if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", + qidx); + schedule_work(&pf->reset_task); } @@ -1249,6 +1339,7 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); /* Schedule NAPI */ + pf->napi_events++; napi_schedule_irqoff(&cq_poll->napi); return IRQ_HANDLED; @@ -1262,6 +1353,7 @@ static void otx2_disable_napi(struct otx2_nic *pf) for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { cq_poll = &qset->napi[qidx]; + cancel_work_sync(&cq_poll->dim.work); napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); } @@ -1306,6 +1398,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) int total_size; int rbuf_size; + if (pf->hw.rbuf_len) + return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; + /* The data transferred by NIX to memory consists of actual packet * plus additional data which has timestamp and/or EDSA/HIGIG2 * headers if interface is configured in corresponding modes. @@ -1379,18 +1474,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) goto err_free_sq_ptrs; } +#ifdef CONFIG_DCB + if (pf->pfc_en) { + err = otx2_pfc_txschq_alloc(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_sq_ptrs; + } + } +#endif + err = otx2_config_nix_queues(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_txsch; } + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - err = otx2_txschq_config(pf, lvl); + err = otx2_txschq_config(pf, lvl, 0, false); if (err) { mutex_unlock(&mbox->lock); goto err_free_nix_queues; } } + +#ifdef CONFIG_DCB + if (pf->pfc_en) { + err = otx2_pfc_txschq_config(pf); + if (err) { + mutex_unlock(&mbox->lock); + goto err_free_nix_queues; + } + } +#endif + mutex_unlock(&mbox->lock); return err; @@ -1445,6 +1562,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) if (err) dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); +#ifdef CONFIG_DCB + if (pf->pfc_en) + otx2_pfc_txschq_stop(pf); +#endif + mutex_lock(&mbox->lock); /* Disable backpressure */ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) @@ -1538,6 +1660,24 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) mutex_unlock(&pf->mbox.lock); } +static void otx2_dim_work(struct work_struct *w) +{ + struct dim_cq_moder cur_moder; + struct otx2_cq_poll *cq_poll; + struct otx2_nic *pfvf; + struct dim *dim; + + dim = container_of(w, struct dim, work); + cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + cq_poll = container_of(dim, struct otx2_cq_poll, dim); + pfvf = (struct otx2_nic *)cq_poll->dev; + pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ? + CQ_TIMER_THRESH_MAX : cur_moder.usec; + pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? + NAPI_POLL_WEIGHT : cur_moder.pkts; + dim->state = DIM_START_MEASURE; +} + int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1604,8 +1744,9 @@ int otx2_open(struct net_device *netdev) cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; cq_poll->dev = (void *)pf; - netif_napi_add(netdev, &cq_poll->napi, - otx2_napi_handler, NAPI_POLL_WEIGHT); + cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; + INIT_WORK(&cq_poll->dim.work, otx2_dim_work); + netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler); napi_enable(&cq_poll->napi); } @@ -1689,9 +1830,6 @@ int otx2_open(struct net_device *netdev) if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) otx2_handle_link_event(pf); - /* Restore pause frame settings */ - otx2_config_pause_frm(pf); - /* Install DMAC Filters */ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) otx2_dmacflt_reinstall_flows(pf); @@ -1713,7 +1851,6 @@ err_free_cints: vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); - synchronize_irq(vec); free_irq(vec, pf); err_disable_napi: otx2_disable_napi(pf); @@ -1757,7 +1894,6 @@ int otx2_stop(struct net_device *netdev) vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); - synchronize_irq(vec); free_irq(vec, pf); /* Cleanup CQ NAPI and IRQ */ @@ -1791,8 +1927,7 @@ int otx2_stop(struct net_device *netdev) kfree(qset->rq); kfree(qset->napi); /* Do not clear RQ/SQ ringsize settings */ - memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0, - sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt)); + memset_startat(qset, 0, sqe_cnt); return 0; } EXPORT_SYMBOL(otx2_stop); @@ -1829,6 +1964,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } +static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ +#ifdef CONFIG_DCB + struct otx2_nic *pf = netdev_priv(netdev); + u8 vlan_prio; +#endif + +#ifdef CONFIG_DCB + if (!skb->vlan_present) + goto pick_tx; + + vlan_prio = skb->vlan_tci >> 13; + if ((vlan_prio > pf->hw.tx_queues - 1) || + !pf->pfc_alloc_status[vlan_prio]) + goto pick_tx; + + return vlan_prio; + +pick_tx: +#endif + return netdev_pick_tx(netdev, skb, NULL); +} + static netdev_features_t otx2_fix_features(struct net_device *dev, netdev_features_t features) { @@ -1858,9 +2017,7 @@ static int otx2_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = features ^ netdev->features; - bool ntuple = !!(features & NETIF_F_NTUPLE); struct otx2_nic *pf = netdev_priv(netdev); - bool tc = !!(features & NETIF_F_HW_TC); if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) return otx2_cgx_config_loopback(pf, @@ -1870,46 +2027,7 @@ static int otx2_set_features(struct net_device *netdev, return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); - if ((changed & NETIF_F_NTUPLE) && !ntuple) - otx2_destroy_ntuple_flows(pf); - - if ((changed & NETIF_F_NTUPLE) && ntuple) { - if (!pf->flow_cfg->max_flows) { - netdev_err(netdev, - "Can't enable NTUPLE, MCAM entries not allocated\n"); - return -EINVAL; - } - } - - if ((changed & NETIF_F_HW_TC) && tc) { - if (!pf->flow_cfg->max_flows) { - netdev_err(netdev, - "Can't enable TC, MCAM entries not allocated\n"); - return -EINVAL; - } - } - - if ((changed & NETIF_F_HW_TC) && !tc && - pf->flow_cfg && pf->flow_cfg->nr_flows) { - netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); - return -EBUSY; - } - - if ((changed & NETIF_F_NTUPLE) && ntuple && - (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { - netdev_err(netdev, - "Can't enable NTUPLE when TC is active, disable TC and retry\n"); - return -EINVAL; - } - - if ((changed & NETIF_F_HW_TC) && tc && - (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { - netdev_err(netdev, - "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); - return -EINVAL; - } - - return 0; + return otx2_handle_ntuple_tc_features(netdev, features); } static void otx2_reset_task(struct work_struct *work) @@ -2002,14 +2120,21 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: + if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) + pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; + + cancel_delayed_work(&pfvf->ptp->synctstamp_work); otx2_config_hw_tx_tstamp(pfvf, false); break; + case HWTSTAMP_TX_ONESTEP_SYNC: + if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) + return -ERANGE; + pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; + schedule_delayed_work(&pfvf->ptp->synctstamp_work, + msecs_to_jiffies(500)); + fallthrough; case HWTSTAMP_TX_ON: otx2_config_hw_tx_tstamp(pfvf, true); break; @@ -2468,6 +2593,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, .ndo_start_xmit = otx2_xmit, + .ndo_select_queue = otx2_select_queue, .ndo_fix_features = otx2_fix_features, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2_change_mtu, @@ -2624,6 +2750,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->tx_queues = qcount; hw->tot_tx_queues = qcount; hw->max_queues = qcount; + hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + /* Use CQE of 128 byte descriptor size by default */ + hw->xqe_size = 128; num_vec = pci_msix_vec_count(pdev); hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, @@ -2720,6 +2849,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + err = cn10k_mcs_init(pf); + if (err) + goto err_del_mcam_entries; + if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) netdev->hw_features |= NETIF_F_NTUPLE; @@ -2741,7 +2874,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; @@ -2752,7 +2885,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_del_mcam_entries; + goto err_mcs_free; } err = otx2_wq_init(pf); @@ -2777,9 +2910,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); - /* Enable pause frames by default */ - pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; - pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; +#ifdef CONFIG_DCB + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_pf_sriov_init; +#endif return 0; @@ -2789,6 +2924,8 @@ err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_mcs_free: + cn10k_mcs_free(pf); err_del_mcam_entries: otx2_mcam_flow_del(pf); err_ptp_destroy: @@ -2924,6 +3061,23 @@ static void otx2_remove(struct pci_dev *pdev) if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) otx2_config_hw_rx_tstamp(pf, false); + /* Disable 802.3x pause frames */ + if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || + (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { + pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; + pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; + otx2_config_pause_frm(pf); + } + + cn10k_mcs_free(pf); + +#ifdef CONFIG_DCB + /* Disable PFC config */ + if (pf->pfc_en) { + pf->pfc_en = 0; + otx2_config_priority_flow_ctrl(pf); + } +#endif cancel_work_sync(&pf->reset_task); /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 61c20907315f..896b2f9bac34 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -10,6 +10,33 @@ #include "otx2_common.h" #include "otx2_ptp.h" +static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) +{ + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_CLOCK; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) return otx2_sync_mbox_msg(&ptp->nic->mbox); } -static u64 ptp_cc_read(const struct cyclecounter *cc) +static int ptp_extts_on(struct otx2_ptp *ptp, int on) { - struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); struct ptp_req *req; - struct ptp_rsp *rsp; - int err; if (!ptp->nic) - return 0; + return -ENODEV; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) - return 0; + return -ENOMEM; - req->op = PTP_OP_GET_CLOCK; + req->op = PTP_OP_EXTTS_ON; + req->extts_on = on; - err = otx2_sync_mbox_msg(&ptp->nic->mbox); - if (err) - return 0; + return otx2_sync_mbox_msg(&ptp->nic->mbox); +} - rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, - &req->hdr); - if (IS_ERR(rsp)) - return 0; +static u64 ptp_cc_read(const struct cyclecounter *cc) +{ + struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); - return rsp->clk; + return otx2_ptp_get_clock(ptp); } static u64 ptp_tstmp_read(struct otx2_ptp *ptp) @@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp) return rsp->clk; } +static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp) +{ + struct otx2_nic *pfvf = ptp->nic; + + mutex_lock(&pfvf->mbox.lock); + *tstamp = timecounter_read(&ptp->time_counter); + mutex_unlock(&pfvf->mbox.lock); +} + static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); - struct otx2_nic *pfvf = ptp->nic; - u64 nsec; + u64 tstamp; - mutex_lock(&pfvf->mbox.lock); - nsec = timecounter_read(&ptp->time_counter); - mutex_unlock(&pfvf->mbox.lock); - - *ts = ns_to_timespec64(nsec); + otx2_get_ptpclock(ptp, &tstamp); + *ts = ns_to_timespec64(tstamp); return 0; } @@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work) event.index = 0; event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); ptp_clock_event(ptp->ptp_clock, &event); - ptp->last_extts = tstmp; - new_thresh = tstmp % 500000000; if (ptp->thresh != new_thresh) { mutex_lock(&ptp->nic->mbox.lock); @@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work) mutex_unlock(&ptp->nic->mbox.lock); ptp->thresh = new_thresh; } + ptp->last_extts = tstmp; } schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); } +static void otx2_sync_tstamp(struct work_struct *work) +{ + struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, + synctstamp_work.work); + struct otx2_nic *pfvf = ptp->nic; + u64 tstamp; + + mutex_lock(&pfvf->mbox.lock); + tstamp = otx2_ptp_get_clock(ptp); + mutex_unlock(&pfvf->mbox.lock); + + ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + ptp->base_ns = tstamp % NSEC_PER_SEC; + + schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250)); +} + static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { @@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, rq->extts.index); if (pin < 0) return -EBUSY; - if (on) + if (on) { + ptp_extts_on(ptp, on); schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); - else + } else { + ptp_extts_on(ptp, on); cancel_delayed_work_sync(&ptp->extts_work); + } return 0; default: break; @@ -294,6 +341,16 @@ int otx2_ptp_init(struct otx2_nic *pfvf) goto error; } + if (is_dev_otx2(pfvf->pdev)) { + ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp; + ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp; + } else { + ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp; + ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp; + } + + INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp); + pfvf->ptp = ptp_ptr; error: @@ -308,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf) if (!ptp) return; + cancel_delayed_work(&pfvf->ptp->synctstamp_work); + ptp_clock_unregister(ptp->ptp_clock); kfree(ptp); pfvf->ptp = NULL; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h index 6ff284211d7b..7ff41927ceaf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -8,6 +8,21 @@ #ifndef OTX2_PTP_H #define OTX2_PTP_H +static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp) +{ + return be64_to_cpu(*(__be64 *)×tamp); +} + +static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp) +{ + return timestamp; +} + +static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp) +{ + return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL); +} + int otx2_ptp_init(struct otx2_nic *pfvf); void otx2_ptp_destroy(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index 4bbd12ff26e6..fa37b9f312ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -236,8 +236,15 @@ struct nix_sqe_sg_s { /* NIX send memory subdescriptor structure */ struct nix_sqe_mem_s { - u64 offset : 16; /* W0 */ - u64 rsvd_51_16 : 36; + u64 start_offset : 8; + u64 rsvd_11_8 : 4; + u64 rsvd_12 : 1; + u64 udp_csum_crt : 1; + u64 update64 : 1; + u64 rsvd_15_16 : 1; + u64 base_ns : 32; + u64 step_type : 1; + u64 rsvd_51_49 : 3; u64 per_lso_seg : 1; u64 wmem : 1; u64 dsz : 2; @@ -274,4 +281,61 @@ enum nix_sqint_e { BIT_ULL(NIX_SQINT_SEND_ERR) | \ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) +enum nix_sqoperr_e { + NIX_SQOPERR_OOR = 0, + NIX_SQOPERR_CTX_FAULT = 1, + NIX_SQOPERR_CTX_POISON = 2, + NIX_SQOPERR_DISABLED = 3, + NIX_SQOPERR_SIZE_ERR = 4, + NIX_SQOPERR_OFLOW = 5, + NIX_SQOPERR_SQB_NULL = 6, + NIX_SQOPERR_SQB_FAULT = 7, + NIX_SQOPERR_SQE_SZ_ZERO = 8, + NIX_SQOPERR_MAX, +}; + +enum nix_mnqerr_e { + NIX_MNQERR_SQ_CTX_FAULT = 0, + NIX_MNQERR_SQ_CTX_POISON = 1, + NIX_MNQERR_SQB_FAULT = 2, + NIX_MNQERR_SQB_POISON = 3, + NIX_MNQERR_TOTAL_ERR = 4, + NIX_MNQERR_LSO_ERR = 5, + NIX_MNQERR_CQ_QUERY_ERR = 6, + NIX_MNQERR_MAX_SQE_SIZE_ERR = 7, + NIX_MNQERR_MAXLEN_ERR = 8, + NIX_MNQERR_SQE_SIZEM1_ZERO = 9, + NIX_MNQERR_MAX, +}; + +enum nix_snd_status_e { + NIX_SND_STATUS_GOOD = 0x0, + NIX_SND_STATUS_SQ_CTX_FAULT = 0x1, + NIX_SND_STATUS_SQ_CTX_POISON = 0x2, + NIX_SND_STATUS_SQB_FAULT = 0x3, + NIX_SND_STATUS_SQB_POISON = 0x4, + NIX_SND_STATUS_HDR_ERR = 0x5, + NIX_SND_STATUS_EXT_ERR = 0x6, + NIX_SND_STATUS_JUMP_FAULT = 0x7, + NIX_SND_STATUS_JUMP_POISON = 0x8, + NIX_SND_STATUS_CRC_ERR = 0x9, + NIX_SND_STATUS_IMM_ERR = 0x10, + NIX_SND_STATUS_SG_ERR = 0x11, + NIX_SND_STATUS_MEM_ERR = 0x12, + NIX_SND_STATUS_INVALID_SUBDC = 0x13, + NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, + NIX_SND_STATUS_DATA_FAULT = 0x15, + NIX_SND_STATUS_DATA_POISON = 0x16, + NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, + NIX_SND_STATUS_LOCK_VIOL = 0x18, + NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, + NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, + NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, + NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, + NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, + NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, + NIX_SND_STATUS_SEND_STATS_ERR = 0x25, + NIX_SND_STATUS_MAX, +}; + #endif /* OTX2_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 626961a41089..e64318c110fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -28,6 +28,9 @@ #define MAX_RATE_EXPONENT 0x0FULL #define MAX_RATE_MANTISSA 0xFFULL +#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL +#define CN10K_MAX_BURST_SIZE 8453888ULL + /* Bitfields in NIX_TLX_PIR register */ #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) @@ -35,6 +38,9 @@ #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) +#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) +#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) + struct otx2_tc_flow_stats { u64 bytes; u64 pkts; @@ -58,7 +64,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; - if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc)) + if (!nic->flow_cfg->max_flows) return 0; /* Max flows changed, free the existing bitmap */ @@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) } EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); -static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, - u32 *burst_mantissa) +static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, + u32 *burst_exp, u32 *burst_mantissa) { + int max_burst, max_mantissa; unsigned int tmp; + if (is_dev_otx2(nic->pdev)) { + max_burst = MAX_BURST_SIZE; + max_mantissa = MAX_BURST_MANTISSA; + } else { + max_burst = CN10K_MAX_BURST_SIZE; + max_mantissa = CN10K_MAX_BURST_MANTISSA; + } + /* Burst is calculated as * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 * Max supported burst size is 130,816 bytes. */ - burst = min_t(u32, burst, MAX_BURST_SIZE); + burst = min_t(u32, burst, max_burst); if (burst) { *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; tmp = burst - rounddown_pow_of_two(burst); - if (burst < MAX_BURST_MANTISSA) + if (burst < max_mantissa) *burst_mantissa = tmp * 2; else *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); } else { *burst_exp = MAX_BURST_EXPONENT; - *burst_mantissa = MAX_BURST_MANTISSA; + *burst_mantissa = max_mantissa; } } -static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, +static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, u32 *mantissa, u32 *div_exp) { - unsigned int tmp; + u64 tmp; /* Rate calculation by hardware * @@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, } } -static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) +static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, + u64 maxrate, u32 burst) { - struct otx2_hw *hw = &nic->hw; - struct nix_txschq_config *req; u32 burst_exp, burst_mantissa; u32 exp, mantissa, div_exp; + u64 regval = 0; + + /* Get exponent and mantissa values from the desired rate */ + otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); + otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); + + if (is_dev_otx2(nic->pdev)) { + regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | + FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + } else { + regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | + FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + } + + return regval; +} + +static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, + u32 burst, u64 maxrate) +{ + struct otx2_hw *hw = &nic->hw; + struct nix_txschq_config *req; int txschq, err; /* All SQs share the same TL4, so pick the first scheduler */ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; - /* Get exponent and mantissa values from the desired rate */ - otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); - otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); - mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); if (!req) { @@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma req->lvl = NIX_TXSCH_LVL_TL4; req->num_regs = 1; req->reg[0] = NIX_AF_TL4X_PIR(txschq); - req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | - FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | - FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | - FIELD_PREP(TLX_RATE_EXPONENT, exp) | - FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); err = otx2_sync_mbox_msg(&nic->mbox); mutex_unlock(&nic->mbox.lock); @@ -190,13 +224,47 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic, return 0; } +static int otx2_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct flow_action *actions = &cls->rule->action; struct flow_action_entry *entry; - u32 rate; + u64 rate; int err; err = otx2_tc_validate_flow(nic, actions, extack); @@ -212,13 +280,17 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE: + err = otx2_policer_validate(&cls->rule->action, entry, extack); + if (err) + return err; + if (entry->police.rate_pkt_ps) { NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); return -EOPNOTSUPP; } /* Convert bytes per second to Mbps */ rate = entry->police.rate_bytes_ps * 8; - rate = max_t(u32, rate / 1000000, 1); + rate = max_t(u64, rate / 1000000, 1); err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); if (err) return err; @@ -315,6 +387,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, u8 nr_police = 0; bool pps = false; u64 rate; + int err; int i; if (!flow_action_has_entries(flow_action)) { @@ -355,6 +428,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return -EOPNOTSUPP; } + err = otx2_policer_validate(flow_action, act, extack); + if (err) + return err; + if (act->police.rate_bytes_ps > 0) { rate = act->police.rate_bytes_ps * 8; burst = act->police.burst; @@ -571,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, flow_spec->dport = match.key->dst; flow_mask->dport = match.mask->dst; - if (ip_proto == IPPROTO_UDP) - req->features |= BIT_ULL(NPC_DPORT_UDP); - else if (ip_proto == IPPROTO_TCP) - req->features |= BIT_ULL(NPC_DPORT_TCP); - else if (ip_proto == IPPROTO_SCTP) - req->features |= BIT_ULL(NPC_DPORT_SCTP); + + if (flow_mask->dport) { + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_DPORT_SCTP); + } flow_spec->sport = match.key->src; flow_mask->sport = match.mask->src; - if (ip_proto == IPPROTO_UDP) - req->features |= BIT_ULL(NPC_SPORT_UDP); - else if (ip_proto == IPPROTO_TCP) - req->features |= BIT_ULL(NPC_SPORT_TCP); - else if (ip_proto == IPPROTO_SCTP) - req->features |= BIT_ULL(NPC_SPORT_SCTP); + + if (flow_mask->sport) { + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } } return otx2_tc_parse_actions(nic, &rule->action, req, f, node); @@ -1023,6 +1106,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_setup_tc); static const struct rhashtable_params tc_flow_ht_params = { .head_offset = offsetof(struct otx2_tc_flow, node), @@ -1052,6 +1136,7 @@ int otx2_init_tc(struct otx2_nic *nic) tc->flow_ht_params = tc_flow_ht_params; return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); } +EXPORT_SYMBOL(otx2_init_tc); void otx2_shutdown_tc(struct otx2_nic *nic) { @@ -1060,3 +1145,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic) kfree(tc->tc_entries_bitmap); rhashtable_destroy(&tc->flow_table); } +EXPORT_SYMBOL(otx2_shutdown_tc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 0cc6353254bf..ef10aef3cda0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -19,6 +19,12 @@ #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) +#define PTP_PORT 0x13F +/* PTPv2 header Original Timestamp starts at byte offset 34 and + * contains 6 byte seconds field and 4 byte nano seconds field. + */ +#define PTP_SYNC_SEC_OFFSET 34 + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, @@ -148,6 +154,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; if (timestamp != 1) { + timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (!err) { memset(&ts, 0, sizeof(ts)); @@ -167,14 +174,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, static void otx2_set_rxtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, void *data) { - u64 tsns; + u64 timestamp, tsns; int err; if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) return; + timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); /* The first 8 bytes is the timestamp */ - err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); + err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (err) return; @@ -433,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { int tx_pkts = 0, tx_bytes = 0, qidx; + struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; @@ -443,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, return 0; process_cqe: + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { @@ -450,18 +462,20 @@ process_cqe: return 0; break; } + if (cq->cq_type == CQ_XDP) { - qidx = cq->cq_idx - pfvf->hw.rx_queues; - otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], - cqe); + otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); } else { - otx2_snd_pkt_handler(pfvf, cq, - &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); + otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget, + &tx_pkts, &tx_bytes); } + cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; cq->pend_cqe--; + + sq->cons_head++; + sq->cons_head &= (sq->sqe_cnt - 1); } /* Free CQEs to HW */ @@ -482,6 +496,18 @@ process_cqe: return 0; } +static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) +{ + struct dim_sample dim_sample; + u64 rx_frames, rx_bytes; + + rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + + OTX2_GET_RX_STATS(RX_UCAST); + rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); + dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); + net_dim(&cq_poll->dim, dim_sample); +} + int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; @@ -519,6 +545,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; + /* Check for adaptive interrupt coalesce */ + if (workdone != 0 && + ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { + /* Adjust irq coalese using net_dim */ + otx2_adjust_adaptive_coalese(pfvf, cq_poll); + /* Update irq coalescing */ + for (i = 0; i < pfvf->hw.cint_cnt; i++) + otx2_config_irq_coalescing(pfvf, i); + } + /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); @@ -599,7 +636,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, ext->subdc = NIX_SUBDC_EXT; if (skb_shinfo(skb)->gso_size) { ext->lso = 1; - ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb); + ext->lso_sb = skb_tcp_all_headers(skb); ext->lso_mps = skb_shinfo(skb)->gso_size; /* Only TSOv4 and TSOv6 GSO offloads are supported */ @@ -661,7 +698,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, } static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, - int alg, u64 iova) + int alg, u64 iova, int ptp_offset, + u64 base_ns, int udp_csum) { struct nix_sqe_mem_s *mem; @@ -671,6 +709,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, mem->wmem = 1; /* wait for the memory operation */ mem->addr = iova; + if (ptp_offset) { + mem->start_offset = ptp_offset; + mem->udp_csum_crt = udp_csum; + mem->base_ns = base_ns; + mem->step_type = 1; + } + *offset += sizeof(*mem); } @@ -906,7 +951,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf, * be correctly modified, hence don't offload such TSO segments. */ - payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + payload_len = skb->len - skb_tcp_all_headers(skb); last_seg_size = payload_len % skb_shinfo(skb)->gso_size; if (last_seg_size && last_seg_size < 16) return false; @@ -927,16 +972,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) return skb_shinfo(skb)->gso_segs; } +static bool otx2_validate_network_transport(struct sk_buff *skb) +{ + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) || + (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) { + struct udphdr *udph = udp_hdr(skb); + + if (udph->source == htons(PTP_PORT) && + udph->dest == htons(PTP_PORT)) + return true; + } + + return false; +} + +static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + u16 nix_offload_hlen = 0, inner_vhlen = 0; + u8 *data = skb->data, *msgtype; + __be16 proto = eth->h_proto; + int network_depth = 0; + + /* NIX is programmed to offload outer VLAN header + * in case of single vlan protocol field holds Network header ETH_IP/V6 + * in case of stacked vlan protocol field holds Inner vlan (8100) + */ + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX && + skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) { + if (skb->vlan_proto == htons(ETH_P_8021AD)) { + /* Get vlan protocol */ + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + /* SKB APIs like skb_transport_offset does not include + * offloaded vlan header length. Need to explicitly add + * the length + */ + nix_offload_hlen = VLAN_HLEN; + inner_vhlen = VLAN_HLEN; + } else if (skb->vlan_proto == htons(ETH_P_8021Q)) { + nix_offload_hlen = VLAN_HLEN; + } + } else if (eth_type_vlan(eth->h_proto)) { + proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); + } + + switch (ntohs(proto)) { + case ETH_P_1588: + if (network_depth) + *offset = network_depth; + else + *offset = ETH_HLEN + nix_offload_hlen + + inner_vhlen; + break; + case ETH_P_IP: + case ETH_P_IPV6: + if (!otx2_validate_network_transport(skb)) + return false; + + *udp_csum = 1; + *offset = nix_offload_hlen + skb_transport_offset(skb) + + sizeof(struct udphdr); + } + + msgtype = data + *offset; + + /* Check PTP messageId is SYNC or not */ + return (*msgtype & 0xf) == 0; +} + static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, struct otx2_snd_queue *sq, int *offset) { + struct ptpv2_tstamp *origin_tstamp; + int ptp_offset = 0, udp_csum = 0; + struct timespec64 ts; u64 iova; - if (!skb_shinfo(skb)->gso_size && - skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + if (unlikely(!skb_shinfo(skb)->gso_size && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { + if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) { + if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) { + origin_tstamp = (struct ptpv2_tstamp *) + ((u8 *)skb->data + ptp_offset + + PTP_SYNC_SEC_OFFSET); + ts = ns_to_timespec64(pfvf->ptp->tstamp); + origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); + origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); + origin_tstamp->nanoseconds = htonl(ts.tv_nsec); + /* Point to correction field in PTP packet */ + ptp_offset += 8; + } + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } iova = sq->timestamps->iova + (sq->head * sizeof(u64)); - otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); + otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova, + ptp_offset, pfvf->ptp->base_ns, udp_csum); } else { skb_tx_timestamp(skb); } @@ -947,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, { struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); struct otx2_nic *pfvf = netdev_priv(netdev); - int offset, num_segs, free_sqe; + int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; - /* Check if there is room for new SQE. - * 'Num of SQBs freed to SQ's pool - SQ's Aura count' - * will give free SQE count. + /* Check if there is enough room between producer + * and consumer index. */ - free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); + if (free_desc < sq->sqe_thresh) + return false; - if (free_sqe < sq->sqe_thresh || - free_sqe < otx2_get_sqe_count(pfvf, skb)) + if (free_desc < otx2_get_sqe_count(pfvf, skb)) return false; num_segs = skb_shinfo(skb)->nr_frags + 1; @@ -1198,7 +1329,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, put_page(page); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: trace_xdp_exception(pfvf->netdev, prog, act); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index f1a04cf9210c..93cac2c2664c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -21,7 +21,7 @@ #define OTX2_HEAD_ROOM OTX2_ALIGN #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) -#define OTX2_MIN_MTU 64 +#define OTX2_MIN_MTU 60 #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 @@ -79,6 +79,7 @@ struct sg_list { struct otx2_snd_queue { u8 aura_id; u16 head; + u16 cons_head; u16 sqe_size; u32 sqe_cnt; u16 num_sqbs; @@ -109,6 +110,7 @@ struct otx2_cq_poll { #define CINT_INVALID_CQ 255 u8 cint_idx; u8 cq_ids[CQS_PER_CINT]; + struct dim dim; struct napi_struct napi; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 78944ad3492f..86653bb8e403 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work) static int otx2vf_set_features(struct net_device *netdev, netdev_features_t features) { - netdev_features_t changed = features ^ netdev->features; - bool ntuple_enabled = !!(features & NETIF_F_NTUPLE); - struct otx2_nic *vf = netdev_priv(netdev); - - if (changed & NETIF_F_NTUPLE) { - if (!ntuple_enabled) { - otx2_mcam_flow_del(vf); - return 0; - } - - if (!otx2_get_maxflows(vf->flow_cfg)) { - netdev_err(netdev, - "Can't enable NTUPLE, MCAM entries not allocated\n"); - return -EINVAL; - } - } - return 0; + return otx2_handle_ntuple_tc_features(netdev, features); } static const struct net_device_ops otx2vf_netdev_ops = { @@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, .ndo_eth_ioctl = otx2_ioctl, + .ndo_setup_tc = otx2_setup_tc, }; static int otx2_wq_init(struct otx2_nic *vf) @@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->tx_queues = qcount; hw->max_queues = qcount; hw->tot_tx_queues = qcount; + hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + /* Use CQE of 128 byte descriptor size by default */ + hw->xqe_size = 128; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); @@ -662,8 +650,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_HW_TC; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; @@ -684,7 +673,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_detach_rsrc; + goto err_ptp_destroy; } err = otx2_wq_init(vf); @@ -697,18 +686,28 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_unreg_netdev; - err = otx2_register_dl(vf); + err = otx2_init_tc(vf); if (err) goto err_unreg_netdev; - /* Enable pause frames by default */ - vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; - vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; + err = otx2_register_dl(vf); + if (err) + goto err_shutdown_tc; + +#ifdef CONFIG_DCB + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_shutdown_tc; +#endif return 0; +err_shutdown_tc: + otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); +err_ptp_destroy: + otx2_ptp_destroy(vf); err_detach_rsrc: if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); @@ -737,11 +736,28 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); + /* Disable 802.3x pause frames */ + if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || + (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { + vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; + vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; + otx2_config_pause_frm(vf); + } + +#ifdef CONFIG_DCB + /* Disable PFC config */ + if (vf->pfc_en) { + vf->pfc_en = 0; + otx2_config_priority_flow_ctrl(vf); + } +#endif + cancel_work_sync(&vf->reset_task); otx2_unregister_dl(vf); unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + otx2_ptp_destroy(vf); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) |