aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/devmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r--kernel/bpf/devmap.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 642c97f6d1b8..141710b82a6c 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -161,6 +161,7 @@ static void dev_map_free(struct bpf_map *map)
list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock);
+ bpf_clear_redirect_map(map);
synchronize_rcu();
/* To ensure all pending flush operations have completed wait for flush
@@ -217,7 +218,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
- struct xdp_bulk_queue *bq, u32 flags)
+ struct xdp_bulk_queue *bq, u32 flags,
+ bool in_napi_ctx)
{
struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0;
@@ -254,7 +256,10 @@ error:
struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */
- xdp_return_frame_rx_napi(xdpf);
+ if (likely(in_napi_ctx))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
drops++;
}
goto out;
@@ -286,7 +291,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
}
}
@@ -316,7 +321,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
- bq_xmit_all(obj, bq, 0);
+ bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
@@ -334,10 +339,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
{
struct net_device *dev = dst->dev;
struct xdp_frame *xdpf;
+ int err;
if (!dev->netdev_ops->ndo_xdp_xmit)
return -EOPNOTSUPP;
+ err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+ if (unlikely(err))
+ return err;
+
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
@@ -350,7 +360,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
{
int err;
- err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
+ err = xdp_ok_fwd_dev(dst->dev, skb->len);
if (unlikely(err))
return err;
skb->dev = dst->dev;
@@ -380,7 +390,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
}
}
@@ -479,6 +489,7 @@ const struct bpf_map_ops dev_map_ops = {
.map_lookup_elem = dev_map_lookup_elem,
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
static int dev_map_notification(struct notifier_block *notifier,