aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-08-21 01:48:12 +0200
committerDavid S. Miller <davem@davemloft.net>2017-08-20 19:45:54 -0700
commit274043c6c95636e62f5b2514e78fdba82eb47601 (patch)
treeff2739d650ef615ede783390a9a784859e0bc3e7 /kernel
parentMerge tag 'mlx5-updates-2017-08-17-V2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux (diff)
downloadlinux-dev-274043c6c95636e62f5b2514e78fdba82eb47601.tar.xz
linux-dev-274043c6c95636e62f5b2514e78fdba82eb47601.zip
bpf: fix double free from dev_map_notification()
In the current code, dev_map_free() can still race with dev_map_notification(). In dev_map_free(), we remove dtab from the list of dtabs after we purged all entries from it. However, we don't do xchg() with NULL or the like, so the entry at that point is still pointing to the device. If a unregister notification comes in at the same time, we therefore risk a double-free, since the pointer is still present in the map, and then pushed again to __dev_map_entry_free(). All this is completely unnecessary. Just remove the dtab from the list right before the synchronize_rcu(), so all outstanding readers from the notifier list have finished by then, thus we don't need to deal with this corner case anymore and also wouldn't need to nullify dev entires. This is fine because we iterate over the map releasing all entries and therefore dev references anyway. Fixes: 4cc7b9544b9a ("bpf: devmap fix mutex in rcu critical section") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/devmap.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 67f4f00ce33a..fa08181d1c3d 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -148,6 +148,11 @@ static void dev_map_free(struct bpf_map *map)
* no further reads against netdev_map. It does __not__ ensure pending
* flush operations (if any) are complete.
*/
+
+ spin_lock(&dev_map_lock);
+ list_del_rcu(&dtab->list);
+ spin_unlock(&dev_map_lock);
+
synchronize_rcu();
/* To ensure all pending flush operations have completed wait for flush
@@ -162,10 +167,6 @@ static void dev_map_free(struct bpf_map *map)
cpu_relax();
}
- /* Although we should no longer have datapath or bpf syscall operations
- * at this point we we can still race with netdev notifier, hence the
- * lock.
- */
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
@@ -180,9 +181,6 @@ static void dev_map_free(struct bpf_map *map)
/* At this point bpf program is detached and all pending operations
* _must_ be complete
*/
- spin_lock(&dev_map_lock);
- list_del_rcu(&dtab->list);
- spin_unlock(&dev_map_lock);
free_percpu(dtab->flush_needed);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab);