aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/en_rx.c
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2016-12-07 15:53:13 -0800
committerDavid S. Miller <davem@davemloft.net>2016-12-08 14:25:13 -0500
commitea3349a03519dcd4f32d949cd80ab995623dc5ac (patch)
tree01d77dc1b6492c486e3df0503fc4b1ef17fb7884 /drivers/net/ethernet/mellanox/mlx4/en_rx.c
parentmlx4: xdp: Allow raising MTU up to one page minus eth and vlan hdrs (diff)
downloadlinux-dev-ea3349a03519dcd4f32d949cd80ab995623dc5ac.tar.xz
linux-dev-ea3349a03519dcd4f32d949cd80ab995623dc5ac.zip
mlx4: xdp: Reserve headroom for receiving packet when XDP prog is active
Reserve XDP_PACKET_HEADROOM for packet and enable bpf_xdp_adjust_head() support. This patch only affects the code path when XDP is active. After testing, the tx_dropped counter is incremented if the xdp_prog sends more than wire MTU. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 23e9d04d1ef4..3c37e216bbf3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -96,7 +96,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
const struct mlx4_en_frag_info *frag_info;
struct page *page;
- dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++) {
@@ -115,9 +114,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
+ frags[i].page_offset += priv->frag_info[i].rx_headroom;
+ rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
+ frags[i].page_offset);
ring_alloc[i] = page_alloc[i];
- rx_desc->data[i].addr = cpu_to_be64(dma);
}
return 0;
@@ -250,7 +250,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
if (ring->page_cache.index > 0) {
frags[0] = ring->page_cache.buf[--ring->page_cache.index];
- rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
+ rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
+ frags[0].page_offset);
return 0;
}
@@ -889,6 +890,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (xdp_prog) {
struct xdp_buff xdp;
dma_addr_t dma;
+ void *orig_data;
u32 act;
dma = be64_to_cpu(rx_desc->data[0].addr);
@@ -896,11 +898,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data = page_address(frags[0].page) +
- frags[0].page_offset;
+ xdp.data_hard_start = page_address(frags[0].page);
+ xdp.data = xdp.data_hard_start + frags[0].page_offset;
xdp.data_end = xdp.data + length;
+ orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ if (xdp.data != orig_data) {
+ length = xdp.data_end - xdp.data;
+ frags[0].page_offset = xdp.data -
+ xdp.data_hard_start;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -1180,6 +1190,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
*/
priv->frag_info[0].frag_stride = PAGE_SIZE;
priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
i = 1;
} else {
int buf_size = 0;
@@ -1194,6 +1205,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
ALIGN(priv->frag_info[i].frag_size,
SMP_CACHE_BYTES);
priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
+ priv->frag_info[i].rx_headroom = 0;
buf_size += priv->frag_info[i].frag_size;
i++;
}