aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c116
1 files changed, 69 insertions, 47 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e2beb89c1832..dbb1c6323967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,6 +42,7 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
+#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
@@ -300,7 +301,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
*/
- mlx5e_xsk_page_release(rq, dma_info);
+ xsk_buff_free(dma_info->xsk);
else
mlx5e_page_release_dynamic(rq, dma_info, recycle);
}
@@ -385,7 +386,11 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
if (rq->umem) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
- if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
+ /* Check in advance that we have enough frames, instead of
+ * allocating one-by-one, failing and moving frames to the
+ * Reuse Ring.
+ */
+ if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
return -ENOMEM;
}
@@ -468,22 +473,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
- struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->opcode = MLX5_OPCODE_NOP;
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -492,23 +481,20 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, contig_wqebbs_room;
+ u16 pi;
int err;
int i;
+ /* Check in advance that we have enough frames, instead of allocating
+ * one-by-one, failing and moving frames to the Reuse Ring.
+ */
if (rq->umem &&
- unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -527,9 +513,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
- sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
- sq->db.ico_wqe[pi].umr.rq = rq;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .umr.rq = rq,
+ };
+
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -618,33 +607,38 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_icosq_wqe_info *wi;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.ico_wqe[ci];
+ wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR))
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++;
- else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+ break;
+ case MLX5E_ICOSQ_WQE_NOP:
+ break;
+ default:
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- wi->opcode);
-
+ "Bad WQE type in ICOSQ WQE info: 0x%x\n",
+ wi->wqe_type);
+ }
} while (!last_wqe);
-
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc;
@@ -1058,12 +1052,24 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
+static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
+ u32 len, struct xdp_buff *xdp)
+{
+ xdp->data_hard_start = va;
+ xdp->data = va + headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &rq->xdp_rxq;
+ xdp->frame_sz = rq->buff.frame0_sz;
+}
+
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
bool consumed;
@@ -1079,11 +1085,14 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
if (unlikely(!skb))
return NULL;
@@ -1229,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto free_wqe;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
@@ -1285,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto mpwrq_cqe_out;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
@@ -1356,6 +1365,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom;
u32 cqe_bcnt32 = cqe_bcnt;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
@@ -1377,7 +1387,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
rcu_read_unlock();
if (consumed) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1385,6 +1396,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL; /* page/packet was consumed by XDP */
}
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
if (unlikely(!skb))
return NULL;
@@ -1501,6 +1514,7 @@ out:
#ifdef CONFIG_MLX5_CORE_IPOIB
+#define MLX5_IB_GRH_SGID_OFFSET 8
#define MLX5_IB_GRH_DGID_OFFSET 24
#define MLX5_GID_SIZE 16
@@ -1514,6 +1528,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
+ u32 flags_rqpn;
u32 qpn;
u8 *dgid;
u8 g;
@@ -1535,7 +1550,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
tstamp = &priv->tstamp;
stats = &priv->channel_stats[rq->ix].rq;
- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
+ g = (flags_rqpn >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
skb->pkt_type = PACKET_HOST;
@@ -1544,9 +1560,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
else
skb->pkt_type = PACKET_MULTICAST;
- /* TODO: IB/ipoib: Allow mcast packets from other VFs
- * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ /* Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
*/
+ if (g && (qpn == (flags_rqpn & 0xffffff)) &&
+ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
+ MLX5_GID_SIZE) == 0)) {
+ skb->dev = NULL;
+ return;
+ }
skb_pull(skb, MLX5_IB_GRH_BYTES);