aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/pensando/ionic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/pensando/ionic')
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c237
2 files changed, 117 insertions, 125 deletions
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 339824cfd618..d0c969a6d43e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -179,8 +179,11 @@ struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
u32 page_offset;
+ u32 len;
};
+#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+
struct ionic_desc_info {
union {
void *desc;
@@ -194,7 +197,7 @@ struct ionic_desc_info {
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
unsigned int nbufs;
- struct ionic_buf_info bufs[IONIC_RX_MAX_SG_ELEMS + 1];
+ struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
ionic_desc_cb cb;
void *cb_arg;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index c63e6e7aa47b..639000a2e495 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -605,12 +605,51 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
return dma_addr;
}
+static int ionic_tx_map_tso(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_buf_info *buf_info)
+{
+ struct device *dev = q->dev;
+ dma_addr_t dma_addr;
+ skb_frag_t *frag;
+ int frag_idx;
+
+ dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+ if (dma_mapping_error(dev, dma_addr))
+ return -EIO;
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = skb_headlen(skb);
+ buf_info++;
+
+ for (frag_idx = 0; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, buf_info++) {
+ frag = &skb_shinfo(skb)->frags[frag_idx];
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (dma_mapping_error(dev, dma_addr))
+ goto dma_fail;
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = skb_frag_size(frag);
+ }
+
+ return 0;
+
+dma_fail:
+ /* unwind the frag mappings and the head mapping */
+ while (frag_idx > 0) {
+ frag_idx--;
+ buf_info--;
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
+ return -EIO;
+}
+
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
+ struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = desc_info->desc;
@@ -623,20 +662,22 @@ static void ionic_tx_clean(struct ionic_queue *q,
decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
&opcode, &flags, &nsge, &addr);
- /* use unmap_single only if either this is not TSO,
- * or this is first descriptor of a TSO
- */
- if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
- flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
+ if (opcode != IONIC_TXQ_DESC_OPCODE_TSO) {
dma_unmap_single(dev, (dma_addr_t)addr,
le16_to_cpu(desc->len), DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, (dma_addr_t)addr,
- le16_to_cpu(desc->len), DMA_TO_DEVICE);
-
- for (i = 0; i < nsge; i++, elem++)
- dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
- le16_to_cpu(elem->len), DMA_TO_DEVICE);
+ for (i = 0; i < nsge; i++, elem++)
+ dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
+ le16_to_cpu(elem->len), DMA_TO_DEVICE);
+ } else {
+ if (flags & IONIC_TXQ_DESC_FLAG_TSO_EOT) {
+ dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ buf_info++;
+ for (i = 1; i < desc_info->nbufs; i++, buf_info++)
+ dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ }
+ }
if (cb_arg) {
struct sk_buff *skb = cb_arg;
@@ -794,29 +835,23 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_buf_info buf_info[IONIC_MAX_FRAGS] = {{0}};
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *rewind_desc_info;
struct ionic_txq_sg_elem *elem;
- struct device *dev = q->dev;
struct ionic_txq_desc *desc;
- unsigned int frag_left = 0;
- unsigned int offset = 0;
- u16 abort = q->head_idx;
- unsigned int len_left;
+ unsigned int chunk_len;
+ unsigned int frag_rem;
+ unsigned int frag_idx;
+ unsigned int tso_rem;
+ unsigned int seg_rem;
dma_addr_t desc_addr;
+ dma_addr_t frag_addr;
unsigned int hdrlen;
unsigned int nfrags;
- unsigned int seglen;
- u64 total_bytes = 0;
- u64 total_pkts = 0;
- u16 rewind = abort;
- unsigned int left;
unsigned int len;
unsigned int mss;
- skb_frag_t *frag;
bool start, done;
bool outer_csum;
- dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
@@ -824,9 +859,12 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap;
int err;
+ if (unlikely(ionic_tx_map_tso(q, skb, buf_info)))
+ return -EIO;
+
+ len = skb->len;
mss = skb_shinfo(skb)->gso_size;
nfrags = skb_shinfo(skb)->nr_frags;
- len_left = skb->len - skb_headlen(skb);
outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
has_vlan = !!skb_vlan_tag_present(skb);
@@ -851,117 +889,68 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- seglen = hdrlen + mss;
- left = skb_headlen(skb);
+ tso_rem = len;
+ seg_rem = min(tso_rem, hdrlen + mss);
- desc = ionic_tx_tso_next(q, &elem);
- start = true;
+ frag_idx = 0;
+ frag_addr = 0;
+ frag_rem = 0;
- /* Chop skb->data up into desc segments */
+ start = true;
- while (left > 0) {
- len = min(seglen, left);
- frag_left = seglen - len;
- desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
- if (dma_mapping_error(dev, desc_addr))
- goto err_out_abort;
- desc_len = len;
+ while (tso_rem > 0) {
+ desc = NULL;
+ elem = NULL;
+ desc_addr = 0;
+ desc_len = 0;
desc_nsge = 0;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb,
- desc_addr, desc_nsge, desc_len,
- hdrlen, mss,
- outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
- desc = ionic_tx_tso_next(q, &elem);
- start = false;
- seglen = mss;
- }
-
- /* Chop skb frags into desc segments */
-
- for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- offset = 0;
- left = skb_frag_size(frag);
- len_left -= left;
- nfrags--;
- stats->frags++;
-
- while (left > 0) {
- if (frag_left > 0) {
- len = min(frag_left, left);
- frag_left -= len;
- addr = ionic_tx_map_frag(q, frag, offset, len);
- if (dma_mapping_error(dev, addr))
- goto err_out_abort;
- elem->addr = cpu_to_le64(addr);
- elem->len = cpu_to_le16(len);
- elem++;
- desc_nsge++;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb, desc_addr,
- desc_nsge, desc_len,
- hdrlen, mss, outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
+ /* loop until a full tcp segment can be created */
+ while (seg_rem > 0) {
+ /* if the fragment is exhausted get the next one */
+ if (frag_rem == 0) {
+ /* grab the next fragment */
+ frag_addr = buf_info[frag_idx].dma_addr;
+ frag_rem = buf_info[frag_idx].len;
+ frag_idx++;
+ }
+ chunk_len = min(frag_rem, seg_rem);
+ if (!desc) {
+ /* fill main descriptor */
desc = ionic_tx_tso_next(q, &elem);
- start = false;
+ desc_addr = frag_addr;
+ desc_len = chunk_len;
} else {
- len = min(mss, left);
- frag_left = mss - len;
- desc_addr = ionic_tx_map_frag(q, frag,
- offset, len);
- if (dma_mapping_error(dev, desc_addr))
- goto err_out_abort;
- desc_len = len;
- desc_nsge = 0;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb, desc_addr,
- desc_nsge, desc_len,
- hdrlen, mss, outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
- desc = ionic_tx_tso_next(q, &elem);
- start = false;
+ /* fill sg descriptor */
+ elem->addr = cpu_to_le64(frag_addr);
+ elem->len = cpu_to_le16(chunk_len);
+ elem++;
+ desc_nsge++;
}
+ frag_addr += chunk_len;
+ frag_rem -= chunk_len;
+ tso_rem -= chunk_len;
+ seg_rem -= chunk_len;
+ }
+ seg_rem = min(tso_rem, mss);
+ done = (tso_rem == 0);
+ if (done) {
+ memcpy(&q->info[q->head_idx].bufs, buf_info, sizeof(buf_info));
+ q->info[q->head_idx].nbufs = nfrags + 1;
}
+ /* post descriptor */
+ ionic_tx_tso_post(q, desc, skb,
+ desc_addr, desc_nsge, desc_len,
+ hdrlen, mss, outer_csum, vlan_tci, has_vlan,
+ start, done);
+ start = false;
}
- stats->pkts += total_pkts;
- stats->bytes += total_bytes;
+ stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
+ stats->bytes += len;
stats->tso++;
- stats->tso_bytes += total_bytes;
+ stats->tso_bytes = len;
return 0;
-
-err_out_abort:
- while (rewind != q->head_idx) {
- rewind_desc_info = &q->info[rewind];
- ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
- rewind = (rewind + 1) & (q->num_descs - 1);
- }
- q->head_idx = abort;
-
- return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)