aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-06-15 22:47:13 +0200
committerDavid S. Miller <davem@davemloft.net>2016-06-15 23:42:57 -0700
commitd056a788765e67773124f520159185bc89f5d1ad (patch)
tree206983a7a6cad6687929f48147bef8e4de3c0ba2 /include/linux
parentbpf, maps: add release callback (diff)
downloadlinux-dev-d056a788765e67773124f520159185bc89f5d1ad.tar.xz
linux-dev-d056a788765e67773124f520159185bc89f5d1ad.zip
bpf, maps: extend map_fd_get_ptr arguments
This patch extends map_fd_get_ptr() callback that is used by fd array maps, so that struct file pointer from the related map can be passed in. It's safe to remove map_update_elem() callback for the two maps since this is only allowed from syscall side, but not from eBPF programs for these two map types. Like in per-cpu map case, bpf_fd_array_map_update_elem() needs to be called directly here due to the extra argument. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h12
1 files changed, 9 insertions, 3 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 29b5a1ae22cb..d7b43e73fe87 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -29,8 +29,9 @@ struct bpf_map_ops {
int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */
- void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
- void (*map_fd_put_ptr) (void *ptr);
+ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
+ int fd);
+ void (*map_fd_put_ptr)(void *ptr);
};
struct bpf_map {
@@ -169,7 +170,7 @@ struct bpf_array {
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_fd_array_map_clear(struct bpf_map *map);
+
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -207,8 +208,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
+
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, u64 map_flags);
+void bpf_fd_array_map_clear(struct bpf_map *map);
+
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
* Best-effort only. No barriers here, since it _will_ race with concurrent