aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2022-09-29 00:21:46 -0700
committerJakub Kicinski <kuba@kernel.org>2022-09-30 07:55:46 -0700
commit6470d2e7e8ed8e9dd560d8dc3e09d1100a17ee26 (patch)
treec0b3d8d6766c3b45d43e356cba0534faa97b509f /drivers/net/ethernet/mellanox/mlx5/core/en_main.c
parentnet/mlx5: Add MLX5_FLEXIBLE_INLEN to safely calculate cmd inlen (diff)
downloadlinux-dev-6470d2e7e8ed8e9dd560d8dc3e09d1100a17ee26.tar.xz
linux-dev-6470d2e7e8ed8e9dd560d8dc3e09d1100a17ee26.zip
net/mlx5e: xsk: Use KSM for unaligned XSK
UMR MTTs used in striding RQ have certain alignment requirements. While it's guaranteed to work when UMR pages are aligned to the UMR page size, in practice it works then UMR pages are aligned to 8 bytes. However, it's still not enough flexibility for the unaligned mode of XSK. This patch leverages KSM to map UMR pages without alignment requirements, when unaligned XSK is active. The downside is that KSM entries are twice as big as MTTs, which limits the maximum WQE size, so regular RQs and aligned XSK continue using MTTs. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c91
1 files changed, 67 insertions, 24 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a4edbb85706e..fbbc2e792c27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -68,7 +68,8 @@
#include "qos.h"
#include "en/trap.h"
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ bool unaligned)
{
u16 umr_wqebbs, max_wqebbs;
bool striding_rq_umr;
@@ -78,7 +79,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_
if (!striding_rq_umr)
return false;
- umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift);
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
/* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
* calculated from mlx5e_get_max_sq_aligned_wqebbs.
@@ -208,9 +209,11 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ u16 octowords;
u8 ds_cnt;
- ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift),
+ ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.unaligned),
MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
@@ -218,8 +221,9 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- ucseg->xlt_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe));
+ octowords = rq->mpwqe.unaligned ? MLX5_KSM_OCTW(rq->mpwqe.pages_per_wqe) :
+ MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe);
+ ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
@@ -279,39 +283,51 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift, u32 *umr_mkey,
- dma_addr_t filler_addr)
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
+ u32 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr, bool unaligned)
{
struct mlx5_mtt *mtt;
+ struct mlx5_ksm *ksm;
+ u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
+ if (unaligned && !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
+ mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
+ return -EINVAL;
+ }
+
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ unaligned ? sizeof(*ksm) : sizeof(*mtt),
+ npages);
+ if (inlen < 0)
+ return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
+ octwords = unaligned ? MLX5_KSM_OCTW(npages) : MLX5_MTT_OCTW(npages);
+
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0,
+ unaligned ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
- MLX5_SET(mkc, mkc, translations_octword_size,
- MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
MLX5_SET(mkc, mkc, log_page_size, page_shift);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- MLX5_MTT_OCTW(npages));
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
@@ -319,9 +335,20 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
- mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for (i = 0 ; i < npages ; i++)
- mtt[i].ptag = cpu_to_be64(filler_addr);
+ if (unaligned) {
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ } else {
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ mtt[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(filler_addr),
+ };
+ }
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
@@ -364,12 +391,24 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = mlx5_wq_ll_get_size(&rq->mpwqe.wq) * rq->mpwqe.mtts_per_wqe;
+ u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ u32 num_entries, max_num_entries;
u32 umr_mkey;
int err;
- err = mlx5e_create_umr_mtt_mkey(mdev, num_mtts, rq->mpwqe.page_shift,
- &umr_mkey, rq->wqe_overflow.addr);
+ max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.unaligned);
+
+ /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
+ if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
+ &num_entries) ||
+ num_entries > max_num_entries))
+ mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
+ __func__, wq_size, rq->mpwqe.mtts_per_wqe,
+ max_num_entries);
+
+ err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
+ &umr_mkey, rq->wqe_overflow.addr,
+ rq->mpwqe.unaligned);
rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
return err;
}
@@ -597,12 +636,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ rq->mpwqe.unaligned = xsk ? xsk->unaligned : false;
rq->mpwqe.pages_per_wqe =
- mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift);
+ mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.unaligned);
rq->mpwqe.umr_wqebbs =
- mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift);
+ mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.unaligned);
rq->mpwqe.mtts_per_wqe =
- mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift);
+ mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.unaligned);
pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
@@ -4932,7 +4975,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT))
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, false))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;