diff options
author | Maxim Mikityanskiy <maximmi@nvidia.com> | 2022-09-29 00:21:42 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-09-30 07:55:45 -0700 |
commit | e5a3cc83d54019a90119ce0cf17b5e21729b79bf (patch) | |
tree | 0b19b6f34dcdd767163ebdd5496d77c844bc3de5 /drivers/net/ethernet/mellanox/mlx5/core/en/params.c | |
parent | xsk: Expose min chunk size to drivers (diff) | |
download | linux-stable-e5a3cc83d54019a90119ce0cf17b5e21729b79bf.tar.xz linux-stable-e5a3cc83d54019a90119ce0cf17b5e21729b79bf.zip |
net/mlx5e: Use runtime page_shift for striding RQ
This commit allows striding RQ to determine MTT page size at runtime,
instead of sticking to the compile-time PAGE_SIZE. This functionality
will be used by a following commit that adjusts the MTT page size to the
XSK frame size.
Stick with PAGE_SIZE for XSK on legacy RQ, as frag_stride is not used in
data path, it only helps calculate how pages are partitioned into
fragments, and PAGE_SIZE will ensure each fragment starts at the
beginning of a new allocation unit (XSK frame).
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/params.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 198 |
1 files changed, 164 insertions, 34 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 5f8912e8404d..1a0d8f9102df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -7,15 +7,96 @@ #include "en_accel/en_accel.h" #include "en_accel/ipsec.h" -u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe) +static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) { - return sizeof(struct mlx5e_umr_wqe) + + u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); + + return min_page_shift ? : 12; +} + +u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) +{ + u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); + u8 req_page_shift = PAGE_SHIFT; + + /* Regular RQ uses order-0 pages, the NIC must be able to map them. */ + if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) + min_page_shift = req_page_shift; + + return max(req_page_shift, min_page_shift); +} + +u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift) +{ + u8 max_pages_per_wqe, max_log_mpwqe_size; + u16 max_wqe_size; + + /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ + max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; + max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), + MLX5_UMR_MTT_ALIGNMENT) / sizeof(struct mlx5_mtt); + max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; + + WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); + + return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); +} + +u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift) +{ + u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift); + u8 pages_per_wqe; + + pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; + + /* Sanity check for further calculations to succeed. */ + BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); + if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) + return MLX5_MPWRQ_MAX_PAGES_PER_WQE; + + return pages_per_wqe; +} + +u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift) +{ + u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift); + u16 umr_wqe_sz; + + umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + ALIGN(pages_per_wqe * sizeof(struct mlx5_mtt), MLX5_UMR_MTT_ALIGNMENT); + + WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); + + return umr_wqe_sz; +} + +u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift) +{ + return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift), MLX5_SEND_WQE_BB); } -u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe) +u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift) { - return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(pages_per_wqe), MLX5_SEND_WQE_BB); + /* Add another page as a buffer between WQEs. This page will absorb + * write overflow by the hardware, when receiving packets larger than + * MTU. These oversize packets are dropped by the driver at a later + * stage. + */ + return MLX5_ALIGN_MTTS(mlx5e_mpwrq_pages_per_wqe(mdev, page_shift) + 1); +} + +static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift) +{ + u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift); + + return ilog2(MLX5E_MAX_RQ_NUM_MTTS / mtts_per_wqe); +} + +u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift) +{ + return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift) + + mlx5e_mpwrq_log_wqe_sz(mdev, page_shift) - + MLX5E_ORDER2_MAX_PACKET_MTU; } u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, @@ -52,14 +133,16 @@ static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); } -static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk) +static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + bool mpwqe) { /* XSK frames are mapped as individual pages, because frames may come in * an arbitrary order from random locations in the UMEM. */ if (xsk) - return PAGE_SIZE; + return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; /* XDP in mlx5e doesn't support multiple packets per page. */ if (params->xdp_prog) @@ -68,15 +151,18 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5e_params *params, return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); } -static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, +static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(params, xsk); + u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); - return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_stride_sz); + return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift) - order_base_2(linear_stride_sz); } -bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, +bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) @@ -96,9 +182,10 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, } static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, - u8 log_stride_sz, u8 log_num_strides) + u8 log_stride_sz, u8 log_num_strides, + u8 page_shift) { - if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ) + if (log_stride_sz + log_num_strides != mlx5e_mpwrq_log_wqe_sz(mdev, page_shift)) return false; if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || @@ -118,28 +205,50 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - s8 log_num_strides; + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + u8 log_num_strides; u8 log_stride_sz; + u8 log_wqe_sz; - if (!mlx5e_rx_is_linear_skb(params, xsk)) + if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) return false; - log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk)); - log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz; + log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); + log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift); - return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides); + if (log_wqe_sz < log_stride_sz) + return false; + + log_num_strides = log_wqe_sz - log_stride_sz; + + return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, + log_num_strides, page_shift); } -u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, +u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk); + u8 log_pkts_per_wqe, page_shift, max_log_rq_size; + + log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); + page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift); /* Numbers are unsigned, don't subtract to avoid underflow. */ if (params->log_rq_mtu_frames < log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + /* Ethtool's rx_max_pending is calculated for regular RQ, that uses + * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a + * frame size not equal to PAGE_SIZE. + * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on + * unexpected failure. + */ + if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size)) + return max_log_rq_size; + return params->log_rq_mtu_frames - log_pkts_per_wqe; } @@ -169,7 +278,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) - return order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk)); + return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); } @@ -178,7 +287,9 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - return MLX5_MPWRQ_LOG_WQE_SZ - + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + + return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift) - mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); } @@ -327,7 +438,9 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev) int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); + + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift)) return -EOPNOTSUPP; if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) @@ -339,12 +452,26 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + u16 max_mtu_pkts; + + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift)) return -EOPNOTSUPP; if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) return -EINVAL; + /* Current RQ length is too big for the given frame size, the + * needed number of WQEs exceeds the maximum. + */ + max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, + mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift)); + if (params->log_rq_mtu_frames > max_mtu_pkts) { + mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", + 1 << params->log_rq_mtu_frames, xsk->chunk_size); + return -EINVAL; + } + return 0; } @@ -358,7 +485,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? - BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : + BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) : BIT(params->log_rq_mtu_frames), BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); @@ -385,7 +512,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && !mlx5e_mpwrq_validate_regular(mdev, params) && (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || - !mlx5e_rx_is_linear_skb(params, NULL))) + !mlx5e_rx_is_linear_skb(mdev, params, NULL))) MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); @@ -428,10 +555,10 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, int max_mtu; int i; - if (mlx5e_rx_is_linear_skb(params, xsk)) { + if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) { int frag_stride; - frag_stride = mlx5e_rx_get_linear_stride_sz(params, xsk); + frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false); info->arr[0].frag_size = byte_count; info->arr[0].frag_stride = frag_stride; @@ -528,7 +655,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); - int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk)); + int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); int wqe_size = BIT(log_stride_sz) * num_strides; /* +1 is for the case that the pkt_per_rsrv dont consume the reservation @@ -552,7 +679,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); else - log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + + log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) + mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -595,9 +722,11 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, - log_wqe_num_of_strides)) { + log_wqe_num_of_strides, + page_shift)) { mlx5_core_err(mdev, "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n", log_wqe_stride_size, log_wqe_num_of_strides); @@ -608,7 +737,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); MLX5_SET(wq, wq, log_wqe_stride_size, log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); - MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { MLX5_SET(wq, wq, shampo_enable, true); MLX5_SET(wq, wq, log_reservation_size, @@ -797,7 +926,8 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE) * + /* UMR WQEs for the regular RQ. */ + wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, PAGE_SHIFT) * (1 << mlx5e_get_rq_log_wq_sz(rqp->rqc)); /* If XDP program is attached, XSK may be turned on at any time without @@ -866,7 +996,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); - param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk); + param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); } |