aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
blob: 04982e888c6357004c58f8b319e6f3f8eee59027 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause

/* Packet transmit logic for Mellanox Gigabit Ethernet driver
 *
 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
 */

#include <linux/skbuff.h>

#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"

/* Transmit Initialization
 * 1) Allocates TX WQE array using coherent DMA mapping
 * 2) Allocates TX completion counter using coherent DMA mapping
 */
int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
{
	size_t size;

	size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
	priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
					       &priv->tx_wqe_base_dma,
					       GFP_KERNEL);
	if (!priv->tx_wqe_base)
		return -ENOMEM;

	priv->tx_wqe_next = priv->tx_wqe_base;

	/* Write TX WQE base address into MMIO reg */
	writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);

	/* Allocate address for TX completion count */
	priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
					 &priv->tx_cc_dma, GFP_KERNEL);
	if (!priv->tx_cc) {
		dma_free_coherent(priv->dev, size,
				  priv->tx_wqe_base, priv->tx_wqe_base_dma);
		return -ENOMEM;
	}

	/* Write TX CC base address into MMIO reg */
	writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);

	writeq(ilog2(priv->tx_q_entries),
	       priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);

	priv->prev_tx_ci = 0;
	priv->tx_pi = 0;

	return 0;
}

/* Transmit Deinitialization
 * This routine will free allocations done by mlxbf_gige_tx_init(),
 * namely the TX WQE array and the TX completion counter
 */
void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
{
	u64 *tx_wqe_addr;
	size_t size;
	int i;

	tx_wqe_addr = priv->tx_wqe_base;

	for (i = 0; i < priv->tx_q_entries; i++) {
		if (priv->tx_skb[i]) {
			dma_unmap_single(priv->dev, *tx_wqe_addr,
					 priv->tx_skb[i]->len, DMA_TO_DEVICE);
			dev_kfree_skb(priv->tx_skb[i]);
			priv->tx_skb[i] = NULL;
		}
		tx_wqe_addr += 2;
	}

	size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
	dma_free_coherent(priv->dev, size,
			  priv->tx_wqe_base, priv->tx_wqe_base_dma);

	dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
			  priv->tx_cc, priv->tx_cc_dma);

	priv->tx_wqe_base = NULL;
	priv->tx_wqe_base_dma = 0;
	priv->tx_cc = NULL;
	priv->tx_cc_dma = 0;
	priv->tx_wqe_next = NULL;
	writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
	writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
}

/* Function that returns status of TX ring:
 *          0: TX ring is full, i.e. there are no
 *             available un-used entries in TX ring.
 *   non-null: TX ring is not full, i.e. there are
 *             some available entries in TX ring.
 *             The non-null value is a measure of
 *             how many TX entries are available, but
 *             it is not the exact number of available
 *             entries (see below).
 *
 * The algorithm makes the assumption that if
 * (prev_tx_ci == tx_pi) then the TX ring is empty.
 * An empty ring actually has (tx_q_entries-1)
 * entries, which allows the algorithm to differentiate
 * the case of an empty ring vs. a full ring.
 */
static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
{
	unsigned long flags;
	u16 avail;

	spin_lock_irqsave(&priv->lock, flags);

	if (priv->prev_tx_ci == priv->tx_pi)
		avail = priv->tx_q_entries - 1;
	else
		avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
			  % priv->tx_q_entries) - 1;

	spin_unlock_irqrestore(&priv->lock, flags);

	return avail;
}

bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
{
	struct net_device_stats *stats;
	u16 tx_wqe_index;
	u64 *tx_wqe_addr;
	u64 tx_status;
	u16 tx_ci;

	tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
	if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
		priv->stats.tx_fifo_full++;
	tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
	stats = &priv->netdev->stats;

	/* Transmit completion logic needs to loop until the completion
	 * index (in SW) equals TX consumer index (from HW).  These
	 * parameters are unsigned 16-bit values and the wrap case needs
	 * to be supported, that is TX consumer index wrapped from 0xFFFF
	 * to 0 while TX completion index is still < 0xFFFF.
	 */
	for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
		tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
		/* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX
		 * buffer address and the 8 LSB contain information
		 * about the TX WQE.
		 */
		tx_wqe_addr = priv->tx_wqe_base +
			       (tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);

		stats->tx_packets++;
		stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);

		dma_unmap_single(priv->dev, *tx_wqe_addr,
				 priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
		dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
		priv->tx_skb[tx_wqe_index] = NULL;

		/* Ensure completion of updates across all cores */
		mb();
	}

	/* Since the TX ring was likely just drained, check if TX queue
	 * had previously been stopped and now that there are TX buffers
	 * available the TX queue can be awakened.
	 */
	if (netif_queue_stopped(priv->netdev) &&
	    mlxbf_gige_tx_buffs_avail(priv))
		netif_wake_queue(priv->netdev);

	return true;
}

/* Function to advance the tx_wqe_next pointer to next TX WQE */
void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
{
	/* Advance tx_wqe_next pointer */
	priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;

	/* Check if 'next' pointer is beyond end of TX ring */
	/* If so, set 'next' back to 'base' pointer of ring */
	if (priv->tx_wqe_next == (priv->tx_wqe_base +
				  (priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
		priv->tx_wqe_next = priv->tx_wqe_base;
}

netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
				  struct net_device *netdev)
{
	struct mlxbf_gige *priv = netdev_priv(netdev);
	long buff_addr, start_dma_page, end_dma_page;
	struct sk_buff *tx_skb;
	dma_addr_t tx_buf_dma;
	unsigned long flags;
	u64 *tx_wqe_addr;
	u64 word2;

	/* If needed, linearize TX SKB as hardware DMA expects this */
	if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
		dev_kfree_skb(skb);
		netdev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}

	buff_addr = (long)skb->data;
	start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
	end_dma_page   = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;

	/* Verify that payload pointer and data length of SKB to be
	 * transmitted does not violate the hardware DMA limitation.
	 */
	if (start_dma_page != end_dma_page) {
		/* DMA operation would fail as-is, alloc new aligned SKB */
		tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
					      &tx_buf_dma, DMA_TO_DEVICE);
		if (!tx_skb) {
			/* Free original skb, could not alloc new aligned SKB */
			dev_kfree_skb(skb);
			netdev->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}

		skb_put_data(tx_skb, skb->data, skb->len);

		/* Free the original SKB */
		dev_kfree_skb(skb);
	} else {
		tx_skb = skb;
		tx_buf_dma = dma_map_single(priv->dev, skb->data,
					    skb->len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->dev, tx_buf_dma)) {
			dev_kfree_skb(skb);
			netdev->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	}

	/* Get address of TX WQE */
	tx_wqe_addr = priv->tx_wqe_next;

	mlxbf_gige_update_tx_wqe_next(priv);

	/* Put PA of buffer address into first 64-bit word of TX WQE */
	*tx_wqe_addr = tx_buf_dma;

	/* Set TX WQE pkt_len appropriately
	 * NOTE: GigE silicon will automatically pad up to
	 *       minimum packet length if needed.
	 */
	word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;

	/* Write entire 2nd word of TX WQE */
	*(tx_wqe_addr + 1) = word2;

	spin_lock_irqsave(&priv->lock, flags);
	priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
	priv->tx_pi++;
	spin_unlock_irqrestore(&priv->lock, flags);

	if (!netdev_xmit_more()) {
		/* Create memory barrier before write to TX PI */
		wmb();
		writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
	}

	/* Check if the last TX entry was just used */
	if (!mlxbf_gige_tx_buffs_avail(priv)) {
		/* TX ring is full, inform stack */
		netif_stop_queue(netdev);

		/* Since there is no separate "TX complete" interrupt, need
		 * to explicitly schedule NAPI poll.  This will trigger logic
		 * which processes TX completions, and will hopefully drain
		 * the TX ring allowing the TX queue to be awakened.
		 */
		napi_schedule(&priv->napi);
	}

	return NETDEV_TX_OK;
}