aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
diff options
context:
space:
mode:
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>2019-11-22 06:30:01 +0530
committerJakub Kicinski <jakub.kicinski@netronome.com>2019-11-22 16:44:39 -0800
commit0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6 (patch)
tree6293d25b2c0ee2d7f4ede7967f44e61a102ea6af /drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadlinux-dev-0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6.tar.xz
linux-dev-0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6.zip
cxgb4/chcr: update SGL DMA unmap for USO
The FW_ETH_TX_EO_WR used for sending UDP Segmentation Offload (USO) requests expects the headers to be part of the descriptor and the payload to be part of the SGL containing the DMA mapped addresses. Hence, the DMA address in the first entry of the SGL can start after the packet headers. Currently, unmap_sgl() tries to unmap from this wrong offset, instead of the originally mapped DMA address. So, use existing unmap_skb() instead, which takes originally saved DMA addresses as input. Update all necessary Tx paths to save the original DMA addresses, so that unmap_skb() can unmap them properly. v2: - No change. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4.h')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
1 files changed, 7 insertions, 12 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3121ed83d8e2..61a2cf62f694 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -735,7 +735,12 @@ struct tx_desc {
__be64 flit[8];
};
-struct tx_sw_desc;
+struct ulptx_sgl;
+
+struct tx_sw_desc {
+ struct sk_buff *skb; /* SKB to free after getting completion */
+ dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
+};
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
@@ -814,15 +819,10 @@ enum sge_eosw_state {
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
};
-struct sge_eosw_desc {
- struct sk_buff *skb; /* SKB to free after getting completion */
- dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
-};
-
struct sge_eosw_txq {
spinlock_t lock; /* Per queue lock to synchronize completions */
enum sge_eosw_state state; /* Current ETHOFLD State */
- struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */
+ struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
u32 ndesc; /* Number of descriptors */
u32 pidx; /* Current Producer Index */
u32 last_pidx; /* Last successfully transmitted Producer Index */
@@ -1151,11 +1151,6 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/