diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1167 |
1 files changed, 813 insertions, 354 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 65571593ec5c..e3a4f01bcceb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,12 +31,12 @@ */ #include <net/tc_act/tc_gact.h> -#include <net/pkt_cls.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> #include <net/geneve.h> #include <linux/bpf.h> #include <linux/if_bridge.h> +#include <linux/filter.h> #include <net/page_pool.h> #include <net/xdp_sock_drv.h> #include "eswitch.h" @@ -45,10 +45,9 @@ #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" +#include "en_accel/macsec.h" #include "en_accel/en_accel.h" -#include "en_accel/tls.h" -#include "accel/ipsec.h" -#include "accel/tls.h" +#include "en_accel/ktls.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" @@ -65,25 +64,29 @@ #include "en/devlink.h" #include "lib/mlx5.h" #include "en/ptp.h" +#include "en/htb.h" #include "qos.h" #include "en/trap.h" -#include "fpga/ipsec.h" -bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) { - bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && - MLX5_CAP_GEN(mdev, umr_ptr_rlky) && - MLX5_CAP_ETH(mdev, reg_umr_sq); - u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); - bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + u16 umr_wqebbs, max_wqebbs; + bool striding_rq_umr; + striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && + MLX5_CAP_ETH(mdev, reg_umr_sq); if (!striding_rq_umr) return false; - if (!inline_umr) { - mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", - (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); + + umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); + max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); + /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is + * calculated from mlx5e_get_max_sq_aligned_wqebbs. + */ + if (WARN_ON(umr_wqebbs > max_wqebbs)) return false; - } + return true; } @@ -200,21 +203,35 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); } +static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode) +{ + u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); + + WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD); + + return entries * umr_entry_size / MLX5_OCTWORD; +} + static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; - u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); + u16 octowords; + u8 ds_cnt; + + ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, + rq->mpwqe.umr_mode), + MLX5_SEND_WQE_DS); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); - cseg->umr_mkey = rq->mkey_be; + cseg->umr_mkey = rq->mpwqe.umr_mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; - ucseg->xlt_octowords = - cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); + octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); + ucseg->xlt_octowords = cpu_to_be16(octowords); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } @@ -260,10 +277,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); + size_t alloc_size; + + alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units, + rq->mpwqe.pages_per_wqe)); - rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, - sizeof(*rq->mpwqe.info)), - GFP_KERNEL, node); + rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node); if (!rq->mpwqe.info) return -ENOMEM; @@ -272,18 +291,52 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) return 0; } -static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev, - u64 npages, u8 page_shift, u32 *umr_mkey, - dma_addr_t filler_addr) + +static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode) +{ + switch (umr_mode) { + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: + return MLX5_MKC_ACCESS_MODE_MTT; + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: + return MLX5_MKC_ACCESS_MODE_KSM; + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + return MLX5_MKC_ACCESS_MODE_KLMS; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + return MLX5_MKC_ACCESS_MODE_KSM; + } + WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode); + return 0; +} + +static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, + u32 npages, u8 page_shift, u32 *umr_mkey, + dma_addr_t filler_addr, + enum mlx5e_mpwrq_umr_mode umr_mode, + u32 xsk_chunk_size) { struct mlx5_mtt *mtt; + struct mlx5_ksm *ksm; + struct mlx5_klm *klm; + u32 octwords; int inlen; void *mkc; u32 *in; int err; int i; - inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages; + if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED || + umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) && + !MLX5_CAP_GEN(mdev, fixed_buffer_size)) { + mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n"); + return -EINVAL; + } + + octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode); + + inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in), + MLX5_OCTWORD, octwords); + if (inlen < 0) + return inlen; in = kvzalloc(inlen, GFP_KERNEL); if (!in) @@ -295,16 +348,17 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode)); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); - MLX5_SET(mkc, mkc, translations_octword_size, - MLX5_MTT_OCTW(npages)); - MLX5_SET(mkc, mkc, log_page_size, page_shift); - MLX5_SET(create_mkey_in, in, translations_octword_actual_size, - MLX5_MTT_OCTW(npages)); + MLX5_SET(mkc, mkc, translations_octword_size, octwords); + if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) + MLX5_SET(mkc, mkc, log_page_size, page_shift - 2); + else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED) + MLX5_SET(mkc, mkc, log_page_size, page_shift); + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords); /* Initialize the mkey with all MTTs pointing to a default * page (filler_addr). When the channels are activated, UMR @@ -312,9 +366,47 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev, * the RQ's pool, while the gaps (wqe_overflow) remain mapped * to the default page. */ - mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - for (i = 0 ; i < npages ; i++) - mtt[i].ptag = cpu_to_be64(filler_addr); + switch (umr_mode) { + case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: + klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages; i++) { + klm[i << 1] = (struct mlx5_klm) { + .va = cpu_to_be64(filler_addr), + .bcount = cpu_to_be32(xsk_chunk_size), + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + }; + klm[(i << 1) + 1] = (struct mlx5_klm) { + .va = cpu_to_be64(filler_addr), + .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size), + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + }; + } + break; + case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: + ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages; i++) + ksm[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + .va = cpu_to_be64(filler_addr), + }; + break; + case MLX5E_MPWRQ_UMR_MODE_ALIGNED: + mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages; i++) + mtt[i] = (struct mlx5_mtt) { + .ptag = cpu_to_be64(filler_addr), + }; + break; + case MLX5E_MPWRQ_UMR_MODE_TRIPLE: + ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0; i < npages * 4; i++) { + ksm[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), + .va = cpu_to_be64(filler_addr), + }; + } + break; + } err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); @@ -357,10 +449,27 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); + u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; + u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq); + u32 num_entries, max_num_entries; + u32 umr_mkey; + int err; + + max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode); - return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT, - &rq->umr_mkey, rq->wqe_overflow.addr); + /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */ + if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe, + &num_entries) || + num_entries > max_num_entries)) + mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n", + __func__, wq_size, rq->mpwqe.mtts_per_wqe, + max_num_entries); + + err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift, + &umr_mkey, rq->wqe_overflow.addr, + rq->mpwqe.umr_mode, xsk_chunk_size); + rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey); + return err; } static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, @@ -377,18 +486,20 @@ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, &rq->mpwqe.shampo->mkey); } -static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) -{ - return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; -} - static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) { struct mlx5e_wqe_frag_info next_frag = {}; struct mlx5e_wqe_frag_info *prev = NULL; int i; - next_frag.di = &rq->wqe.di[0]; + if (rq->xsk_pool) { + /* Assumptions used by XSK batched allocator. */ + WARN_ON(rq->wqe.info.num_frags != 1); + WARN_ON(rq->wqe.info.log_num_frags != 0); + WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE); + } + + next_frag.au = &rq->wqe.alloc_units[0]; for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; @@ -398,7 +509,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { - next_frag.di++; + next_frag.au++; next_frag.offset = 0; if (prev) prev->last_in_page = true; @@ -415,12 +526,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) prev->last_in_page = true; } -int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node) +static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node) { int len = wq_sz << rq->wqe.info.log_num_frags; - rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node); - if (!rq->wqe.di) + rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)), + GFP_KERNEL, node); + if (!rq->wqe.alloc_units) return -ENOMEM; mlx5e_init_frags_partition(rq); @@ -428,9 +540,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node) return 0; } -void mlx5e_free_di_list(struct mlx5e_rq *rq) +static void mlx5e_free_au_list(struct mlx5e_rq *rq) { - kvfree(rq->wqe.di); + kvfree(rq->wqe.alloc_units); } static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) @@ -476,16 +588,17 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->clock = &mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; + rq->channel = c; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdpsq = &c->rq_xdpsq; - rq->stats = &c->priv->channel_stats[c->ix].rq; + rq->stats = &c->priv->channel_stats[c->ix]->rq; rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); err = mlx5e_rq_set_handlers(rq, params, NULL); if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id); } static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, @@ -572,6 +685,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); pool_size = 1 << params->log_rq_mtu_frames; + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); + switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, @@ -587,19 +702,31 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - pool_size = MLX5_MPWRQ_PAGES_PER_WQE << - mlx5e_mpwqe_get_log_rq_size(params, xsk); + rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); + rq->mpwqe.pages_per_wqe = + mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift, + rq->mpwqe.umr_mode); + rq->mpwqe.umr_wqebbs = + mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift, + rq->mpwqe.umr_mode); + rq->mpwqe.mtts_per_wqe = + mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift, + rq->mpwqe.umr_mode); + + pool_size = rq->mpwqe.pages_per_wqe << + mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk); rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); + rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz); rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz); err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_drop_page; - rq->mkey_be = cpu_to_be32(rq->umr_mkey); err = mlx5e_rq_alloc_mpwqe_info(rq, node); if (err) @@ -607,7 +734,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node); if (err) - goto err_free_by_rq_type; + goto err_free_mpwqe_info; break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -632,11 +759,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, goto err_rq_wq_destroy; } - err = mlx5e_init_di_list(rq, wq_sz, node); + err = mlx5e_init_au_list(rq, wq_sz, node); if (err) goto err_rq_frags; - - rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); } if (xsk) { @@ -661,14 +786,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, if (IS_ERR(rq->page_pool)) { err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; - goto err_free_shampo; + goto err_free_by_rq_type; } if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); } if (err) - goto err_free_shampo; + goto err_destroy_page_pool; for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@ -676,13 +801,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - u64 dma_offset = mlx5e_get_mpwqe_offset(i); + u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) << + rq->mpwqe.page_shift; u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ? 0 : rq->buff.headroom; wqe->data[0].addr = cpu_to_be64(dma_offset + headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); - wqe->data[0].lkey = rq->mkey_be; + wqe->data[0].lkey = rq->mpwqe.umr_mkey_be; } else { struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); @@ -720,19 +846,21 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, return 0; -err_free_shampo: - mlx5e_rq_free_shampo(rq); +err_destroy_page_pool: + page_pool_destroy(rq->page_pool); err_free_by_rq_type: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + mlx5e_rq_free_shampo(rq); +err_free_mpwqe_info: kvfree(rq->mpwqe.info); err_rq_mkey: - mlx5_core_destroy_mkey(mdev, rq->umr_mkey); + mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be)); err_rq_drop_page: mlx5e_free_mpwqe_rq_drop_page(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ - mlx5e_free_di_list(rq); + mlx5e_free_au_list(rq); err_rq_frags: kvfree(rq->wqe.frags); } @@ -760,24 +888,22 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); - mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey); + mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be)); mlx5e_free_mpwqe_rq_drop_page(rq); mlx5e_rq_free_shampo(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ kvfree(rq->wqe.frags); - mlx5e_free_di_list(rq); + mlx5e_free_au_list(rq); } for (i = rq->page_cache.head; i != rq->page_cache.tail; i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) { - struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i]; - /* With AF_XDP, page_cache is not used, so this loop is not * entered, and it's safe to call mlx5e_page_release_dynamic * directly. */ - mlx5e_page_release_dynamic(rq, dma_info, false); + mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false); } xdp_rxq_info_unreg(&rq->xdp_rxq); @@ -832,7 +958,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) return err; } -int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) +static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5_core_dev *mdev = rq->mdev; @@ -861,6 +987,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) return err; } +static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) +{ + struct net_device *dev = rq->netdev; + int err; + + err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); + return err; + } + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); + return err; + } + + return 0; +} + +int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) +{ + mlx5e_free_rx_descs(rq); + + return mlx5e_rq_to_ready(rq, curr_state); +} + static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) { struct mlx5_core_dev *mdev = rq->mdev; @@ -1033,9 +1185,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (err) goto err_destroy_rq; - if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) - __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ - if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); @@ -1069,13 +1218,6 @@ err_free_rq: void mlx5e_activate_rq(struct mlx5e_rq *rq) { set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - if (rq->icosq) { - mlx5e_trigger_irq(rq->icosq); - } else { - local_bh_disable(); - napi_schedule(rq->cq.napi); - local_bh_enable(); - } } void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -1087,8 +1229,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); - if (rq->icosq) - cancel_work_sync(&rq->icosq->recover_work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1161,10 +1301,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->xsk_pool = xsk_pool; sq->stats = sq->xsk_pool ? - &c->priv->channel_stats[c->ix].xsksq : + &c->priv->channel_stats[c->ix]->xsksq : is_redirect ? - &c->priv->channel_stats[c->ix].xdpsq : - &c->priv->channel_stats[c->ix].rq_xdpsq; + &c->priv->channel_stats[c->ix]->xdpsq : + &c->priv->channel_stats[c->ix]->rq_xdpsq; + sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) : + mlx5e_stop_room_for_max_wqe(mdev); + sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1216,9 +1359,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_icosq_cqe_err(sq); } +static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work) +{ + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, + recover_work); + + /* Not implemented yet. */ + + netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n"); +} + static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5e_sq_param *param, - struct mlx5e_icosq *sq) + struct mlx5e_icosq *sq, + work_func_t recover_work_func) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; @@ -1239,7 +1393,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); + INIT_WORK(&sq->recover_work, recover_work_func); return 0; @@ -1303,7 +1457,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int err; sq->pdev = c->pdev; - sq->tstamp = c->tstamp; sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; sq->netdev = c->netdev; @@ -1314,10 +1467,11 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); - if (MLX5_IPSEC_DEV(c->priv->mdev)) + if (mlx5_ipsec_device_caps(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); @@ -1575,13 +1729,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_tx_err_cqe(sq); } -int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) +static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, + work_func_t recover_work_func) { struct mlx5e_create_sq_param csp = {}; int err; - err = mlx5e_alloc_icosq(c, param, sq); + err = mlx5e_alloc_icosq(c, param, sq, recover_work_func); if (err) return err; @@ -1620,7 +1775,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) synchronize_net(); /* Sync with NAPI. */ } -void mlx5e_close_icosq(struct mlx5e_icosq *sq) +static void mlx5e_close_icosq(struct mlx5e_icosq *sq) { struct mlx5e_channel *c = sq->channel; @@ -1648,14 +1803,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + + /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet + * supported by upstream, and there is no defined trigger to allow + * transmitting redirected multi-buffer frames. + */ + if (param->is_xdp_mb && !is_redirect) + set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); if (err) goto err_free_xdpsq; mlx5e_set_xmit_fp(sq, param->is_mpw); - if (!param->is_mpw) { - unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; + if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { + unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1; unsigned int inline_hdr_sz = 0; int i; @@ -1898,8 +2061,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, { int tc; - if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || - !params->mqprio.channel.rl) { + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) { *hw_id = 0; return 0; } @@ -1908,7 +2070,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, if (tc < 0) return tc; - return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); + if (tc >= params->mqprio.num_tc) { + WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u", + tc, params->mqprio.num_tc); + return -EINVAL; + } + + *hw_id = params->mqprio.channel.hw_id[tc]; + return 0; } static int mlx5e_open_sqs(struct mlx5e_channel *c, @@ -1928,7 +2097,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->txq_sq, &c->sq[tc], tc, qos_queue_group_id, - &c->priv->channel_stats[c->ix].sq[tc]); + &c->priv->channel_stats[c->ix]->sq[tc]); if (err) goto err_close_sqs; } @@ -2084,11 +2253,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, spin_lock_init(&c->async_icosq_lock); - err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); + err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq, + mlx5e_async_icosq_err_cqe_work); if (err) goto err_close_xdpsq_cq; - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); + mutex_init(&c->icosq_recovery_lock); + + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq, + mlx5e_icosq_err_cqe_work); if (err) goto err_close_async_icosq; @@ -2156,9 +2329,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_xdpsq(&c->xdpsq); if (c->xdp) mlx5e_close_xdpsq(&c->rq_xdpsq); + /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */ + cancel_work_sync(&c->icosq.recover_work); mlx5e_close_rq(&c->rq); mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); + mutex_destroy(&c->icosq_recovery_lock); mlx5e_close_icosq(&c->async_icosq); if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); @@ -2176,6 +2352,44 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); } +static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu) +{ + if (ix > priv->stats_nch) { + netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix, + priv->stats_nch); + return -EINVAL; + } + + if (priv->channel_stats[ix]) + return 0; + + /* Asymmetric dynamic memory allocation. + * Freed in mlx5e_priv_arrays_free, not on channel closure. + */ + mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix); + priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats), + GFP_KERNEL, cpu_to_node(cpu)); + if (!priv->channel_stats[ix]) + return -ENOMEM; + priv->stats_nch++; + + return 0; +} + +void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c) +{ + spin_lock_bh(&c->async_icosq_lock); + mlx5e_trigger_irq(&c->async_icosq); + spin_unlock_bh(&c->async_icosq_lock); +} + +void mlx5e_trigger_napi_sched(struct napi_struct *napi) +{ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_params *params, struct mlx5e_channel_param *cparam, @@ -2193,6 +2407,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) return err; + err = mlx5e_channel_stats_alloc(priv, ix, cpu); + if (err) + return err; + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -2207,11 +2425,11 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; - c->stats = &priv->channel_stats[ix].ch; + c->stats = &priv->channel_stats[ix]->ch; c->aff_mask = irq_get_effective_affinity_mask(irq); c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); - netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll); err = mlx5e_open_queues(c, params, cparam); if (unlikely(err)) @@ -2249,10 +2467,13 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_icosq(&c->icosq); mlx5e_activate_icosq(&c->async_icosq); - mlx5e_activate_rq(&c->rq); if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_activate_xsk(c); + else + mlx5e_activate_rq(&c->rq); + + mlx5e_trigger_napi_icosq(c); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) @@ -2261,8 +2482,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_deactivate_xsk(c); + else + mlx5e_deactivate_rq(&c->rq); - mlx5e_deactivate_rq(&c->rq); mlx5e_deactivate_icosq(&c->async_icosq); mlx5e_deactivate_icosq(&c->icosq); for (tc = 0; tc < c->num_tc; tc++) @@ -2318,9 +2540,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - err = mlx5e_qos_open_queues(priv, chs); - if (err) - goto err_close_ptp; + if (priv->htb) { + err = mlx5e_qos_open_queues(priv, chs); + if (err) + goto err_close_ptp; + } mlx5e_health_channels_update(priv); kvfree(cparam); @@ -2352,8 +2576,6 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) mlx5e_ptp_activate_channel(chs->ptp); } -#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ - static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) { int err = 0; @@ -2361,8 +2583,12 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) for (i = 0; i < chs->num; i++) { int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; + struct mlx5e_channel *c = chs->c[i]; + + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + continue; - err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); + err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout); /* Don't wait on the XSK RQ, because the newer xdpsock sample * doesn't provide any Fill Ring entries at the setup stage. @@ -2502,9 +2728,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc, int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) { - int qos_queues, nch, ntc, num_txqs, err; + int nch, ntc, num_txqs, err; + int qos_queues = 0; - qos_queues = mlx5e_qos_cur_leaf_nodes(priv); + if (priv->htb) + qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb); nch = priv->channels.params.num_channels; ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); @@ -2525,7 +2753,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq; struct net_device *netdev = priv->netdev; int old_num_txqs, old_ntc; - int num_rxqs, nch, ntc; + int nch, ntc; int err; int i; @@ -2536,7 +2764,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.mqprio.num_tc; - num_rxqs = nch * priv->profile->rq_groups; tc_to_txq = priv->channels.params.mqprio.tc_to_txq; err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq); @@ -2545,18 +2772,11 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) err = mlx5e_update_tx_netdev_queues(priv); if (err) goto err_tcs; - err = netif_set_real_num_rx_queues(netdev, num_rxqs); + err = netif_set_real_num_rx_queues(netdev, nch); if (err) { netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; } - if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) { - if (priv->mqprio_rl) { - mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); - mlx5e_mqprio_rl_free(priv->mqprio_rl); - } - priv->mqprio_rl = priv->channels.params.mqprio.channel.rl; - } return 0; @@ -2598,7 +2818,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, } } -int mlx5e_num_channels_changed(struct mlx5e_priv *priv) +static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) { u16 count = priv->channels.params.num_channels; int err; @@ -2631,43 +2851,46 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; - priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } if (!priv->channels.ptp) - return; + goto out; if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state)) - return; + goto out; for (tc = 0; tc < num_tc; tc++) { struct mlx5e_ptp *c = priv->channels.ptp; struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; priv->txq2sq[sq->txq_ix] = sq; - priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; } -} -static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) -{ - /* Sync with mlx5e_select_queue. */ - WRITE_ONCE(priv->num_tc_x_num_ch, - mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num); +out: + /* Make the change to txq2sq visible before the queue is started. + * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, + * which pairs with this barrier. + */ + smp_wmb(); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { - mlx5e_update_num_tc_x_num_ch(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); - mlx5e_qos_activate_queues(priv); + if (priv->htb) + mlx5e_qos_activate_queues(priv); mlx5e_xdp_tx_enable(priv); + + /* dev_watchdog() wants all TX queues to be started when the carrier is + * OK, including the ones in range real_num_tx_queues..num_tx_queues-1. + * Make it happy to avoid TX timeout false alarms. + */ netif_tx_start_all_queues(priv->netdev); if (mlx5e_is_vport_rep(priv)) - mlx5e_add_sqs_fwd_rules(priv); + mlx5e_rep_activate_channels(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2681,13 +2904,15 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) mlx5e_rx_res_channels_deactivate(priv->rx_res); if (mlx5e_is_vport_rep(priv)) - mlx5e_remove_sqs_fwd_rules(priv); + mlx5e_rep_deactivate_channels(priv); - /* FIXME: This is a W/A only for tx timeout watch dog false alarm when - * polling for inactive tx queues. + /* The results of ndo_select_queue are unreliable, while netdev config + * is being changed (real_num_tx_queues, num_tc). Stop all queues to + * prevent ndo_start_xmit from being called, so that it can assume that + * the selected queue is always valid. */ - netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } @@ -2747,6 +2972,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, mlx5e_close_channels(&old_chs); priv->profile->update_rx(priv); + mlx5e_selq_apply(&priv->selq); out: mlx5e_activate_priv_channels(priv); @@ -2770,13 +2996,24 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, return mlx5e_switch_priv_params(priv, params, preactivate, context); new_chs.params = *params; + + mlx5e_selq_prepare_params(&priv->selq, &new_chs.params); + err = mlx5e_open_channels(priv, &new_chs); if (err) - return err; + goto err_cancel_selq; + err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context); if (err) - mlx5e_close_channels(&new_chs); + goto err_close; + return 0; + +err_close: + mlx5e_close_channels(&new_chs); + +err_cancel_selq: + mlx5e_selq_cancel(&priv->selq); return err; } @@ -2816,6 +3053,8 @@ int mlx5e_open_locked(struct net_device *netdev) struct mlx5e_priv *priv = netdev_priv(netdev); int err; + mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params); + set_bit(MLX5E_STATE_OPENED, &priv->state); err = mlx5e_open_channels(priv, &priv->channels); @@ -2823,6 +3062,7 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; priv->profile->update_rx(priv); + mlx5e_selq_apply(&priv->selq); mlx5e_activate_priv_channels(priv); mlx5e_apply_traps(priv, true); if (priv->profile->update_carrier) @@ -2833,6 +3073,7 @@ int mlx5e_open_locked(struct net_device *netdev) err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_selq_cancel(&priv->selq); return err; } @@ -3050,6 +3291,12 @@ err_close_tises: static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + priv->mqprio_rl = NULL; + } + mlx5e_accel_cleanup_tx(priv); mlx5e_destroy_tises(priv); } @@ -3118,19 +3365,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) { params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.num_tc = num_tc; - params->mqprio.channel.rl = NULL; mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, params->num_channels); } +static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params, + struct mlx5e_mqprio_rl *rl) +{ + int tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) { + u32 hw_id = 0; + + if (rl) + mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id); + params->mqprio.channel.hw_id[tc] = hw_id; + } +} + static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, - struct tc_mqprio_qopt *qopt, + struct tc_mqprio_qopt_offload *mqprio, struct mlx5e_mqprio_rl *rl) { + int tc; + params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; - params->mqprio.num_tc = qopt->num_tc; - params->mqprio.channel.rl = rl; - mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); + params->mqprio.num_tc = mqprio->qopt.num_tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) + params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc]; + + mlx5e_mqprio_rl_update_params(params, rl); + mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt); } static void mlx5e_params_mqprio_reset(struct mlx5e_params *params) @@ -3156,6 +3422,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_num_channels_changed_ctx, NULL, true); + if (!err && priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + priv->mqprio_rl = NULL; + } + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, mlx5e_get_dcb_num_tc(&priv->channels.params)); return err; @@ -3214,16 +3486,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, return 0; } -static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) +static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[]) { int tc; - for (tc = 0; tc < mqprio->qopt.num_tc; tc++) - if (mqprio->max_rate[tc]) + for (tc = 0; tc < num_tc; tc++) + if (max_rate[tc]) return true; return false; } +static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev, + u8 num_tc, u64 max_rate[]) +{ + struct mlx5e_mqprio_rl *rl; + int err; + + if (!mlx5e_mqprio_rate_limit(num_tc, max_rate)) + return NULL; + + rl = mlx5e_mqprio_rl_alloc(); + if (!rl) + return ERR_PTR(-ENOMEM); + + err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate); + if (err) { + mlx5e_mqprio_rl_free(rl); + return ERR_PTR(err); + } + + return rl; +} + static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { @@ -3237,32 +3531,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, if (err) return err; - rl = NULL; - if (mlx5e_mqprio_rate_limit(mqprio)) { - rl = mlx5e_mqprio_rl_alloc(); - if (!rl) - return -ENOMEM; - err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc, - mqprio->max_rate); - if (err) { - mlx5e_mqprio_rl_free(rl); - return err; - } - } + rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate); + if (IS_ERR(rl)) + return PTR_ERR(rl); new_params = priv->channels.params; - mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl); + mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl); nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : mlx5e_update_netdev_queues_ctx; err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); - if (err && rl) { - mlx5e_mqprio_rl_cleanup(rl); - mlx5e_mqprio_rl_free(rl); + if (err) { + if (rl) { + mlx5e_mqprio_rl_cleanup(rl); + mlx5e_mqprio_rl_free(rl); + } + return err; } - return err; + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + priv->mqprio_rl = rl; + + return 0; } static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, @@ -3271,7 +3565,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, /* MQPRIO is another toplevel qdisc that can't be attached * simultaneously with the offloaded HTB. */ - if (WARN_ON(priv->htb.maj_id)) + if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq))) return -EINVAL; switch (mqprio->mode) { @@ -3284,47 +3578,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, } } -static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb) -{ - int res; - - switch (htb->command) { - case TC_HTB_CREATE: - return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid, - htb->extack); - case TC_HTB_DESTROY: - return mlx5e_htb_root_del(priv); - case TC_HTB_LEAF_ALLOC_QUEUE: - res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid, - htb->rate, htb->ceil, htb->extack); - if (res < 0) - return res; - htb->qid = res; - return 0; - case TC_HTB_LEAF_TO_INNER: - return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid, - htb->rate, htb->ceil, htb->extack); - case TC_HTB_LEAF_DEL: - return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack); - case TC_HTB_LEAF_DEL_LAST: - case TC_HTB_LEAF_DEL_LAST_FORCE: - return mlx5e_htb_leaf_del_last(priv, htb->classid, - htb->command == TC_HTB_LEAF_DEL_LAST_FORCE, - htb->extack); - case TC_HTB_NODE_MODIFY: - return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil, - htb->extack); - case TC_HTB_LEAF_QUERY_QUEUE: - res = mlx5e_get_txq_by_classid(priv, htb->classid); - if (res < 0) - return res; - htb->qid = res; - return 0; - default: - return -EOPNOTSUPP; - } -} - static LIST_HEAD(mlx5e_block_cb_list); static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -3358,7 +3611,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, return err; case TC_SETUP_QDISC_HTB: mutex_lock(&priv->state_lock); - err = mlx5e_setup_tc_htb(priv, type_data); + err = mlx5e_htb_setup_tc(priv, type_data); mutex_unlock(&priv->state_lock); return err; default: @@ -3371,7 +3624,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) int i; for (i = 0; i < priv->stats_nch; i++) { - struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i]; struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; int j; @@ -3446,7 +3699,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + - PPORT_802_3_GET(pstats, a_frame_too_long_errors); + PPORT_802_3_GET(pstats, a_frame_too_long_errors) + + VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small); stats->rx_crc_errors = PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); @@ -3509,20 +3763,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); - if (enable && priv->xsk.refcnt) { - netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n", - priv->xsk.refcnt); - err = -EINVAL; - goto out; - } - cur_params = &priv->channels.params; - if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { - netdev_warn(netdev, "can't set LRO with legacy RQ\n"); - err = -EINVAL; - goto out; - } - new_params = *cur_params; if (enable) @@ -3559,11 +3800,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) new_params = priv->channels.params; if (enable) { - if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { - netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n"); - err = -EINVAL; - goto out; - } new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO; new_params.packet_merge.shampo.match_criteria_type = MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED; @@ -3575,8 +3811,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@ -3587,9 +3822,11 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); if (enable) - mlx5e_enable_cvlan_filter(priv); + mlx5e_enable_cvlan_filter(priv->fs, + !!(priv->netdev->flags & IFF_PROMISC)); else - mlx5e_disable_cvlan_filter(priv); + mlx5e_disable_cvlan_filter(priv->fs, + !!(priv->netdev->flags & IFF_PROMISC)); return 0; } @@ -3597,21 +3834,26 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) static int set_feature_hw_tc(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) - if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { + int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) : + MLX5_TC_FLAG(NIC_OFFLOAD); + if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; } #endif - if (!enable && priv->htb.maj_id) { + mutex_lock(&priv->state_lock); + if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) { netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n"); - return -EINVAL; + err = -EINVAL; } + mutex_unlock(&priv->state_lock); - return 0; + return err; } static int set_feature_rx_all(struct net_device *netdev, bool enable) @@ -3693,20 +3935,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); + mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable); priv->channels.params.vlan_strip_disable = !enable; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; err = mlx5e_modify_channels_vsd(&priv->channels, !enable); - if (err) + if (err) { + mlx5e_fs_set_vlan_strip_disable(priv->fs, enable); priv->channels.params.vlan_strip_disable = enable; - + } unlock: mutex_unlock(&priv->state_lock); return err; } +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_flow_steering *fs = priv->fs; + + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + + return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid); +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_flow_steering *fs = priv->fs; + + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + + return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid); +} + #ifdef CONFIG_MLX5_EN_ARFS static int set_feature_arfs(struct net_device *netdev, bool enable) { @@ -3714,9 +3981,9 @@ static int set_feature_arfs(struct net_device *netdev, bool enable) int err; if (enable) - err = mlx5e_arfs_enable(priv); + err = mlx5e_arfs_enable(priv->fs); else - err = mlx5e_arfs_disable(priv); + err = mlx5e_arfs_disable(priv->fs); return err; } @@ -3724,12 +3991,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable) static int mlx5e_handle_feature(struct net_device *netdev, netdev_features_t *features, - netdev_features_t wanted_features, netdev_features_t feature, mlx5e_feature_handler feature_handler) { - netdev_features_t changes = wanted_features ^ netdev->features; - bool enable = !!(wanted_features & feature); + netdev_features_t changes = *features ^ netdev->features; + bool enable = !!(*features & feature); int err; if (!(changes & feature)) @@ -3737,22 +4003,22 @@ static int mlx5e_handle_feature(struct net_device *netdev, err = feature_handler(netdev, enable); if (err) { + MLX5E_SET_FEATURE(features, feature, !enable); netdev_err(netdev, "%s feature %pNF failed, err %d\n", enable ? "Enable" : "Disable", &feature, err); return err; } - MLX5E_SET_FEATURE(features, feature, enable); return 0; } int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { - netdev_features_t oper_features = netdev->features; + netdev_features_t oper_features = features; int err = 0; #define MLX5E_HANDLE_FEATURE(feature, handler) \ - mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) + mlx5e_handle_feature(netdev, &oper_features, feature, handler) err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); @@ -3790,6 +4056,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev if (netdev->features & NETIF_F_NTUPLE) netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n"); + features &= ~NETIF_F_GRO_HW; + if (netdev->features & NETIF_F_GRO_HW) + netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n"); + return features; } @@ -3797,12 +4067,14 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_vlan_table *vlan; struct mlx5e_params *params; + vlan = mlx5e_fs_get_vlan(priv->fs); mutex_lock(&priv->state_lock); params = &priv->channels.params; - if (!priv->fs.vlan || - !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) { + if (!vlan || + !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ @@ -3822,10 +4094,39 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, } } + if (params->xdp_prog) { + if (features & NETIF_F_LRO) { + netdev_warn(netdev, "LRO is incompatible with XDP\n"); + features &= ~NETIF_F_LRO; + } + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "HW GRO is incompatible with XDP\n"); + features &= ~NETIF_F_GRO_HW; + } + } + + if (priv->xsk.refcnt) { + if (features & NETIF_F_LRO) { + netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n", + priv->xsk.refcnt); + features &= ~NETIF_F_LRO; + } + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n", + priv->xsk.refcnt); + features &= ~NETIF_F_GRO_HW; + } + } + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { features &= ~NETIF_F_RXHASH; if (netdev->features & NETIF_F_RXHASH) netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); + + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n"); + features &= ~NETIF_F_GRO_HW; + } } if (mlx5e_is_uplink_rep(priv)) @@ -3862,7 +4163,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE. */ max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr); - max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk); + max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0)); max_mtu = min(max_mtu_frame, max_mtu_page); netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n", @@ -3874,6 +4175,33 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, return true; } +static bool mlx5e_params_validate_xdp(struct net_device *netdev, + struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + bool is_linear; + + /* No XSK params: AF_XDP can't be enabled yet at the point of setting + * the XDP program. + */ + is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL); + + if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { + netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n", + params->sw_mtu, + mlx5e_xdp_max_mtu(params, NULL)); + return false; + } + if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) { + netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n", + params->sw_mtu, + mlx5e_xdp_max_mtu(params, NULL)); + return false; + } + + return true; +} + int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, mlx5e_fp_preactivate preactivate) { @@ -3893,10 +4221,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (err) goto out; - if (params->xdp_prog && - !mlx5e_rx_is_linear_skb(&new_params, NULL)) { - netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", - new_mtu, mlx5e_xdp_max_mtu(params, NULL)); + if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev, + &new_params)) { err = -EINVAL; goto out; } @@ -3911,19 +4237,21 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO) reset = false; - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) { bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL); bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_params, NULL); - u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL); - u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL); + u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL); + u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL); /* Always reset in linear mode - hw_mtu is used in data path. * Check that the mode was non-linear and didn't change. * If XSK is active, XSK RQs are linear. + * Reset if the RQ size changed, even if it's non-linear. */ if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt && - ppw_old == ppw_new) + sz_old == sz_new) reset = false; } @@ -4038,7 +4366,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) goto err_unlock; } - if (!priv->profile->rx_ptp_support) + if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) err = mlx5e_hwstamp_config_no_ptp_rx(priv, config.rx_filter != HWTSTAMP_FILTER_NONE); else @@ -4370,24 +4698,11 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - if (mlx5_fpga_is_ipsec_device(priv->mdev)) { - netdev_warn(netdev, - "XDP is not available on Innova cards with IPsec support\n"); - return -EINVAL; - } - new_params = priv->channels.params; new_params.xdp_prog = prog; - /* No XSK params: AF_XDP can't be enabled yet at the point of setting - * the XDP program. - */ - if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) { - netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_params.sw_mtu, - mlx5e_xdp_max_mtu(&new_params, NULL)); + if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params)) return -EINVAL; - } return 0; } @@ -4424,8 +4739,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) new_params = priv->channels.params; new_params.xdp_prog = prog; - if (reset) - mlx5e_set_rq_type(priv->mdev, &new_params); + + /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be + * supported with the new parameters: if PAGE_SIZE is bigger than + * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though + * the MTU is small enough for the linear mode, because XDP uses strides + * of PAGE_SIZE on regular RQs. + */ + if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */ + err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params); + if (err) + goto unlock; + } + old_prog = priv->channels.params.xdp_prog; err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); @@ -4454,6 +4781,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) unlock: mutex_unlock(&priv->state_lock); + + /* Need to fix some features. */ + if (!err) + netdev_update_features(netdev); + return err; } @@ -4592,11 +4924,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 priv->max_nch); mlx5e_params_mqprio_reset(params); - /* Set an initial non-zero value, so that mlx5e_select_queue won't - * divide by zero if called before first activating channels. - */ - priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; - /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -4618,14 +4945,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* RQ */ mlx5e_build_rq_params(mdev, params); - /* HW LRO */ - if (MLX5_CAP_ETH(mdev, lro_cap) && - params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - /* No XSK params: checking the availability of striding RQ in general. */ - if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) - params->packet_merge.type = slow_pci_heuristic(mdev) ? - MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO; - } params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ @@ -4735,6 +5054,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_RXCSUM; netdev->vlan_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_GSO_PARTIAL; netdev->mpls_features |= NETIF_F_SG; netdev->mpls_features |= NETIF_F_HW_CSUM; @@ -4752,7 +5072,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (!!MLX5_CAP_ETH(mdev, lro_cap) && !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && !MLX5_CAP_ETH(mdev, tunnel_lro_gre) && - mlx5e_check_fragmented_striding_rq_cap(mdev)) + mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED)) netdev->vlan_features |= NETIF_F_LRO; netdev->hw_features = netdev->vlan_features; @@ -4761,10 +5082,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (!!MLX5_CAP_GEN(mdev, shampo) && - mlx5e_check_fragmented_striding_rq_cap(mdev)) - netdev->hw_features |= NETIF_F_GRO_HW; - if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4773,15 +5090,22 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; } if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { - netdev->hw_features |= NETIF_F_GSO_GRE; - netdev->hw_enc_features |= NETIF_F_GSO_GRE; - netdev->gso_partial_features |= NETIF_F_GSO_GRE; + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; } if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { @@ -4793,7 +5117,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_IPXIP6; } - netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= NETIF_F_GSO_UDP_L4; netdev->features |= NETIF_F_GSO_UDP_L4; @@ -4836,9 +5159,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; + netif_set_tso_max_size(netdev, GSO_MAX_SIZE); mlx5e_set_netdev_dev_addr(netdev); + mlx5e_macsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv); - mlx5e_tls_build_netdev(priv); + mlx5e_ktls_build_netdev(priv); } void mlx5e_create_q_counters(struct mlx5e_priv *priv) @@ -4883,6 +5208,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_flow_steering *fs; int err; mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); @@ -4890,17 +5216,20 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); - err = mlx5e_fs_init(priv); - if (err) { + fs = mlx5e_fs_init(priv->profile, mdev, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + if (!fs) { + err = -ENOMEM; mlx5_core_err(mdev, "FS initialization failed, %d\n", err); return err; } + priv->fs = fs; err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); - err = mlx5e_tls_init(priv); + err = mlx5e_ktls_init(priv); if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); @@ -4911,9 +5240,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_health_destroy_reporters(priv); - mlx5e_tls_cleanup(priv); + mlx5e_ktls_cleanup(priv); mlx5e_ipsec_cleanup(priv); - mlx5e_fs_cleanup(priv); + mlx5e_fs_cleanup(priv->fs); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@ -4934,7 +5263,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; + features = MLX5E_RX_RES_FEATURE_PTP; if (priv->channels.params.tunneled_offload_en) features |= MLX5E_RX_RES_FEATURE_INNER_FT; err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, @@ -4944,7 +5273,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_close_drop_rq; - err = mlx5e_create_flow_steering(priv); + err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile, + priv->netdev); if (err) { mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); goto err_destroy_rx_res; @@ -4967,7 +5297,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: - mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: @@ -4983,7 +5314,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); - mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + priv->profile); mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); @@ -4991,6 +5323,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) priv->rx_res = NULL; } +static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv) +{ + struct mlx5e_params *params; + struct mlx5e_mqprio_rl *rl; + + params = &priv->channels.params; + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) + return; + + rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc, + params->mqprio.channel.max_rate); + if (IS_ERR(rl)) + rl = NULL; + priv->mqprio_rl = rl; + mlx5e_mqprio_rl_update_params(params, rl); +} + static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) { int err; @@ -5001,16 +5350,30 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) return err; } + err = mlx5e_accel_init_tx(priv); + if (err) + goto err_destroy_tises; + + mlx5e_set_mqprio_rl(priv); mlx5e_dcbnl_initialize(priv); return 0; + +err_destroy_tises: + mlx5e_destroy_tises(priv); + return err; } static void mlx5e_nic_enable(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; + int err; + + mlx5e_fs_init_l2_addr(priv->fs, netdev); - mlx5e_init_l2_addr(priv); + err = mlx5e_macsec_init(priv); + if (err) + mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err); /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) @@ -5069,6 +5432,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_disable_async_events(priv); mlx5_lag_remove_netdev(mdev, priv->netdev); mlx5_vxlan_reset_to_default(mdev->vxlan); + mlx5e_macsec_cleanup(priv); } int mlx5e_update_nic_rx(struct mlx5e_priv *priv) @@ -5090,12 +5454,27 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_nic, .max_tc = MLX5E_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, .stats_grps_num = mlx5e_nic_stats_grps_num, - .rx_ptp_support = true, + .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | + BIT(MLX5E_PROFILE_FEATURE_PTP_TX) | + BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) | + BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) | + BIT(MLX5E_PROFILE_FEATURE_FS_TC), }; +static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + int nch; + + nch = mlx5e_get_max_num_channels(mdev); + + if (profile->max_nch_limit) + nch = min_t(int, nch, profile->max_nch_limit(mdev)); + return nch; +} + static unsigned int mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile) @@ -5104,11 +5483,10 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, unsigned int max_nch, tmp; /* core resources */ - max_nch = mlx5e_get_max_num_channels(mdev); + max_nch = mlx5e_profile_max_num_channels(mdev, profile); /* netdev rx queues */ - tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); - max_nch = min_t(unsigned int, max_nch, tmp); + max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues); /* netdev tx queues */ tmp = netdev->num_tx_queues; @@ -5122,25 +5500,52 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, return max_nch; } +int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev) +{ + /* Indirect TIRS: 2 sets of TTCs (inner + outer steering) + * and 1 set of direct TIRS + */ + return 2 * MLX5E_NUM_INDIR_TIRS + + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile); +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev); +} + /* mlx5e generic netdev management API (move to en_common.c) */ int mlx5e_priv_init(struct mlx5e_priv *priv, const struct mlx5e_profile *profile, struct net_device *netdev, struct mlx5_core_dev *mdev) { + int nch, num_txqs, node; + int err; + + num_txqs = netdev->num_tx_queues; + nch = mlx5e_calc_max_nch(mdev, netdev, profile); + node = dev_to_node(mlx5_core_dma_dev(mdev)); + /* priv init */ priv->mdev = mdev; priv->netdev = netdev; priv->msglevel = MLX5E_MSG_LEVEL; - priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile); - priv->stats_nch = priv->max_nch; + priv->max_nch = nch; priv->max_opened_tc = 1; if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) return -ENOMEM; mutex_init(&priv->state_lock); - hash_init(priv->htb.qos_tc2node); + + err = mlx5e_selq_init(&priv->selq, &priv->state_lock); + if (err) + goto err_free_cpumask; + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); @@ -5148,13 +5553,33 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, priv->wq = create_singlethread_workqueue("mlx5e"); if (!priv->wq) - goto err_free_cpumask; + goto err_free_selq; + + priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node); + if (!priv->txq2sq) + goto err_destroy_workqueue; + + priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node); + if (!priv->tx_rates) + goto err_free_txq2sq; + + priv->channel_stats = + kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node); + if (!priv->channel_stats) + goto err_free_tx_rates; return 0; +err_free_tx_rates: + kfree(priv->tx_rates); +err_free_txq2sq: + kfree(priv->txq2sq); +err_destroy_workqueue: + destroy_workqueue(priv->wq); +err_free_selq: + mlx5e_selq_cleanup(&priv->selq); err_free_cpumask: free_cpumask_var(priv->scratchpad.cpumask); - return -ENOMEM; } @@ -5166,28 +5591,58 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) if (!priv->mdev) return; + for (i = 0; i < priv->stats_nch; i++) + kvfree(priv->channel_stats[i]); + kfree(priv->channel_stats); + kfree(priv->tx_rates); + kfree(priv->txq2sq); destroy_workqueue(priv->wq); + mutex_lock(&priv->state_lock); + mlx5e_selq_cleanup(&priv->selq); + mutex_unlock(&priv->state_lock); free_cpumask_var(priv->scratchpad.cpumask); - for (i = 0; i < priv->htb.max_qos_sqs; i++) - kfree(priv->htb.qos_sq_stats[i]); - kvfree(priv->htb.qos_sq_stats); - - if (priv->mqprio_rl) { - mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); - mlx5e_mqprio_rl_free(priv->mqprio_rl); - } + for (i = 0; i < priv->htb_max_qos_sqs; i++) + kfree(priv->htb_qos_sq_stats[i]); + kvfree(priv->htb_qos_sq_stats); memset(priv, 0, sizeof(*priv)); } +static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + unsigned int nch, ptp_txqs, qos_txqs; + + nch = mlx5e_profile_max_num_channels(mdev, profile); + + ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) && + mlx5e_profile_feature_cap(profile, PTP_TX) ? + profile->max_tc : 0; + + qos_txqs = mlx5_qos_is_supported(mdev) && + mlx5e_profile_feature_cap(profile, QOS_HTB) ? + mlx5e_qos_max_leaf_nodes(mdev) : 0; + + return nch * profile->max_tc + ptp_txqs + qos_txqs; +} + +static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + return mlx5e_profile_max_num_channels(mdev, profile); +} + struct net_device * -mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, - unsigned int txqs, unsigned int rxqs) +mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile) { struct net_device *netdev; + unsigned int txqs, rxqs; int err; + txqs = mlx5e_get_max_num_txqs(mdev, profile); + rxqs = mlx5e_get_max_num_rxqs(mdev, profile); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); @@ -5201,6 +5656,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof } netif_carrier_off(netdev); + netif_tx_disable(netdev); dev_net_set(netdev, mlx5_core_net(mdev)); return netdev; @@ -5234,6 +5690,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) int err; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + mlx5e_fs_set_state_destroy(priv->fs, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + + /* Validate the max_wqe_size_sq capability. */ + if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) { + mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n", + mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS); + return -EIO; + } /* max number of channels may have changed */ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile); @@ -5293,6 +5759,9 @@ err_cleanup_tx: out: mlx5e_reset_channels(priv->netdev); set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + mlx5e_fs_set_state_destroy(priv->fs, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); cancel_work_sync(&priv->update_stats_work); return err; } @@ -5302,6 +5771,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) const struct mlx5e_profile *profile = priv->profile; set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + mlx5e_fs_set_state_destroy(priv->fs, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); if (profile->disable) profile->disable(priv); @@ -5389,7 +5861,7 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) static int mlx5e_resume(struct auxiliary_device *adev) { struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); - struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); + struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = edev->mdev; int err; @@ -5412,7 +5884,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) { - struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); + struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; @@ -5432,22 +5904,10 @@ static int mlx5e_probe(struct auxiliary_device *adev, struct mlx5_core_dev *mdev = edev->mdev; struct net_device *netdev; pm_message_t state = {}; - unsigned int txqs, rxqs, ptp_txqs = 0; struct mlx5e_priv *priv; - int qos_sqs = 0; int err; - int nch; - if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) - ptp_txqs = profile->max_tc; - - if (mlx5_qos_is_supported(mdev)) - qos_sqs = mlx5e_qos_max_leaf_nodes(mdev); - - nch = mlx5e_get_max_num_channels(mdev); - txqs = nch * profile->max_tc + ptp_txqs + qos_sqs; - rxqs = nch * profile->rq_groups; - netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs); + netdev = mlx5e_create_netdev(mdev, profile); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); return -ENOMEM; @@ -5456,7 +5916,7 @@ static int mlx5e_probe(struct auxiliary_device *adev, mlx5e_build_nic_netdev(netdev); priv = netdev_priv(netdev); - dev_set_drvdata(&adev->dev, priv); + auxiliary_set_drvdata(adev, priv); priv->profile = profile; priv->ppriv = NULL; @@ -5504,7 +5964,7 @@ err_destroy_netdev: static void mlx5e_remove(struct auxiliary_device *adev) { - struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); + struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); pm_message_t state = {}; mlx5e_dcbnl_delete_app(priv); @@ -5535,7 +5995,6 @@ int mlx5e_init(void) { int ret; - mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); ret = auxiliary_driver_register(&mlx5e_driver); if (ret) |