aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
diff options
context:
space:
mode:
authorJohn Hurley <john.hurley@netronome.com>2019-04-15 16:55:55 +0200
committerDavid S. Miller <davem@davemloft.net>2019-04-15 15:45:36 -0700
commit27f54b582567bef2bfb9ee6f23aed6137cf9cfcb (patch)
tree0bef6fdc68d0d5ad3b17e45db5a57ee59af07137 /drivers/net/ethernet/netronome/nfp/nfp_net_common.c
parentnfp: flower: allow offloading of matches on 'internal' ports (diff)
downloadlinux-dev-27f54b582567bef2bfb9ee6f23aed6137cf9cfcb.tar.xz
linux-dev-27f54b582567bef2bfb9ee6f23aed6137cf9cfcb.zip
nfp: allow fallback packets from non-reprs
Currently, it is assumed that fallback packets will be from reprs. Modify this to allow an app to receive non-repr ports from the fallback channel - e.g. from an internal port. If such a packet is received, do not update repr stats. Change the naming function calls so as not to imply it will always be a repr netdev returned. Add the option to set a bool value to redirect a fallback packet out the returned port rather than RXing it. Setting of this bool in subsequent patches allows the handling of packets falling back when they are due to egress an internal port. Signed-off-by: John Hurley <john.hurley@netronome.com> Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Simon Horman <simon.horman@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/nfp_net_common.c')
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e4be04cdef30..58657fe504d7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1683,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ bool redir_egress = false;
struct net_device *netdev;
dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0;
@@ -1818,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn;
nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
- nfp_repr_inc_rx_stats(netdev, pkt_len);
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
}
skb = build_skb(rxbuf->frag, true_bufsz);
@@ -1859,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
}
if (xdp_prog) {