aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_tx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c119
1 files changed, 98 insertions, 21 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 82b4419af9d4..e47e2a0059d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -39,6 +40,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "lib/clock.h"
+#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
@@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
+static bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ goto return_txq;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+ up = mlx5e_get_dscp_up(priv, skb);
+ else
+#endif
+ if (skb_vlan_tag_present(skb))
+ up = skb_vlan_tag_get_prio(skb);
+
+return_txq:
+ return priv->port_ptp_tc2realtxq[up];
+}
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- int txq_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
+ int txq_ix;
int up = 0;
int ch_ix;
+ if (unlikely(priv->channels.port_ptp)) {
+ int num_tc_x_num_ch;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
+
+ /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+ /* Fix netdev_pick_tx() not to choose ptp_channel txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq().
+ */
+ if (unlikely(txq_ix >= num_tc_x_num_ch))
+ txq_ix %= num_tc_x_num_ch;
+ } else {
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+ }
+
if (!netdev_get_num_tc(dev))
return txq_ix;
@@ -144,7 +205,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
+ * need to set L3 checksum flag for IPsec
+ */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
@@ -154,19 +217,15 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
sq->stats->csum_partial_inner++;
} else {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
}
static inline void
-mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
+ struct mlx5_wqe_eth_seg *eseg)
{
- if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
- return;
- }
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -177,6 +236,14 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
+#ifdef CONFIG_MLX5_EN_TLS
+ } else if (unlikely(accel && accel->tls.tls_tisn)) {
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial++;
+#endif
+ } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+
} else
sq->stats->csum_none++;
}
@@ -396,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_check_stop(sq);
+ if (unlikely(sq->ptpsq)) {
+ mlx5e_skb_cb_hwtstamp_init(skb);
+ mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+ skb_get(skb);
+ }
+
send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
@@ -573,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
- mlx5e_skb_fifo_push(sq, skb);
+ mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
@@ -608,12 +681,13 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
}
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+ struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
+ struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
return false;
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
return true;
}
@@ -640,7 +714,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -657,7 +731,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
@@ -676,7 +750,7 @@ void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit
mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
}
@@ -700,7 +774,11 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
- skb_tstamp_tx(skb, &hwts);
+ if (sq->ptpsq)
+ mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
+ hwts.hwtstamp, sq->ptpsq->cq_stats);
+ else
+ skb_tstamp_tx(skb, &hwts);
}
napi_consume_skb(skb, napi_budget);
@@ -712,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
int i;
for (i = 0; i < wi->num_fifo_pkts; i++) {
- struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
mlx5e_consume_skb(sq, skb, cqe, napi_budget);
}
@@ -798,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->channel->priv->wq,
- &sq->recover_work);
+ queue_work(cq->priv->wq, &sq->recover_work);
}
stats->cqe_err++;
}
@@ -833,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_
int i;
for (i = 0; i < wi->num_fifo_pkts; i++)
- dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
+ dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
}
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
@@ -945,7 +1022,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
eseg->mss = attr.mss;