aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>2019-11-22 06:30:01 +0530
committerJakub Kicinski <jakub.kicinski@netronome.com>2019-11-22 16:44:39 -0800
commit0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6 (patch)
tree6293d25b2c0ee2d7f4ede7967f44e61a102ea6af /drivers/crypto
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadlinux-dev-0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6.tar.xz
linux-dev-0ed96b46c0ac26ebcdd2ee95c2479ef8cf94bbd6.zip
cxgb4/chcr: update SGL DMA unmap for USO
The FW_ETH_TX_EO_WR used for sending UDP Segmentation Offload (USO) requests expects the headers to be part of the descriptor and the payload to be part of the SGL containing the DMA mapped addresses. Hence, the DMA address in the first entry of the SGL can start after the packet headers. Currently, unmap_sgl() tries to unmap from this wrong offset, instead of the originally mapped DMA address. So, use existing unmap_skb() instead, which takes originally saved DMA addresses as input. Update all necessary Tx paths to save the original DMA addresses, so that unmap_skb() can unmap them properly. v2: - No change. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 24355680f30a..9da0f93a330b 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -673,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ unsigned int last_desc, ndesc, flits = 0;
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
+ struct tx_sw_desc *sgl_sdesc;
int qidx, left, credits;
- unsigned int flits = 0, ndesc;
- struct adapter *adap;
+ bool immediate = false;
struct sge_eth_txq *q;
+ struct adapter *adap;
struct port_info *pi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
struct sec_path *sp;
- bool immediate = false;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
@@ -715,8 +715,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
goto out_free;
}
@@ -742,17 +748,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
- 0, addr);
+ 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);