aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h93
1 files changed, 88 insertions, 5 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index bfd3e1161bc6..9334c9c3e208 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -5,14 +5,47 @@
#define __MLX5_EN_TXRX_H___
#include "en.h"
+#include <linux/indirect_call_wrapper.h>
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
+#ifdef CONFIG_MLX5_EN_TLS
+ MLX5E_ICOSQ_WQE_UMR_TLS,
+ MLX5E_ICOSQ_WQE_SET_PSV_TLS,
+ MLX5E_ICOSQ_WQE_GET_PSV_TLS,
+#endif
};
+/* General */
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
+
+/* RX */
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info,
+ bool recycle);
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+
+/* TX */
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
@@ -114,9 +147,19 @@ struct mlx5e_icosq_wqe_info {
struct {
struct mlx5e_rq *rq;
} umr;
+#ifdef CONFIG_MLX5_EN_TLS
+ struct {
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ } tls_set_params;
+ struct {
+ struct mlx5e_ktls_rx_resync_buf *buf;
+ } tls_get_params;
+#endif
};
};
+void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
+
static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -182,7 +225,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{
- return cseg && !!cseg->tisn;
+ return cseg && !!cseg->tis_tir_num;
}
static inline u8
@@ -253,7 +296,7 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
}
}
-static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
struct mlx5_err_cqe *err_cqe)
{
struct mlx5_cqwq *wq = &cq->wq;
@@ -262,13 +305,53 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
netdev_err(cq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- cq->mcq.cqn, ci, sqn,
+ "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ cq->mcq.cqn, ci, qn,
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
err_cqe->syndrome, err_cqe->vendor_err_synd);
mlx5_dump_err_cqe(cq->mdev, err_cqe);
}
+static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ }
+}
+
+static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return rq->mpwqe.wq.cur_sz;
+ default:
+ return rq->wqe.wq.cur_sz;
+ }
+}
+
+static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_head(&rq->wqe.wq);
+ }
+}
+
+static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
+ }
+}
+
/* SW parser related functions */
struct mlx5e_swp_spec {
@@ -305,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* fall through */
+ fallthrough;
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;