From 6f007c6486d69967ac1d9e67df9ae9c77d49f1cc Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Mon, 7 Dec 2020 14:45:26 -0800 Subject: gve: Add support for raw addressing in the tx path During TX, skbs' data addresses are dma_map'ed and passed to the NIC. This means that the device can perform DMA directly from these addresses and the driver does not have to copy the buffer content into pre-allocated buffers/qpls (as in qpl mode). Reviewed-by: Yangchun Fu Signed-off-by: Catherine Sullivan Signed-off-by: David Awogbemila Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/google/gve/gve.h') diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 8aad4af2aa2b..daf07c0f790b 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -112,12 +112,20 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; +struct gve_tx_dma_buf { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ - struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + union { + struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + struct gve_tx_dma_buf buf; + }; }; /* A TX buffer - each queue has one */ @@ -140,13 +148,17 @@ struct gve_tx_ring { __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */ u64 pkt_done; /* free-running - total packets completed */ u64 bytes_done; /* free-running - total bytes completed */ + u64 dropped_pkt; /* free-running - total packets dropped */ + u64 dma_mapping_error; /* count of dma mapping errors */ /* Cacheline 2 -- Read-mostly fields */ union gve_tx_desc *desc ____cacheline_aligned; struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */ struct netdev_queue *netdev_txq; struct gve_queue_resources *q_resources; /* head and tail pointer idx */ + struct device *dev; u32 mask; /* masks req and done down to queue size */ + u8 raw_addressing; /* use raw_addressing? */ /* Slow-path fields */ u32 q_num ____cacheline_aligned; /* queue idx */ @@ -442,7 +454,7 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) */ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) { - return priv->tx_cfg.num_queues; + return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues; } /* Returns the number of rx queue page lists -- cgit v1.2.3-59-g8ed1b