aboutsummaryrefslogtreecommitdiffstats
path: root/net/xfrm
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2016-08-09 12:16:06 +0200
committerSteffen Klassert <steffen.klassert@secunet.com>2016-08-10 11:23:23 +0200
commitdf7274eb70b7c8488170ebe8757dd94647a8e1e5 (patch)
tree9c750e4e1ef1315fcc8a8dd517591b97b94cf955 /net/xfrm
parentxfrm: state: use atomic_inc_not_zero to increment refcount (diff)
downloadlinux-dev-df7274eb70b7c8488170ebe8757dd94647a8e1e5.tar.xz
linux-dev-df7274eb70b7c8488170ebe8757dd94647a8e1e5.zip
xfrm: state: delay freeing until rcu grace period has elapsed
The hash table backend memory and the state structs are free'd via kfree/vfree. Once we only rely on rcu during lookups we have to make sure no other cpu is currently accessing this before doing the free. Free operations already happen from worker so we can use synchronize_rcu to wait until concurrent readers are done. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/xfrm_state.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 84c1db6254d5..8e373876924f 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -146,6 +146,9 @@ static void xfrm_hash_resize(struct work_struct *work)
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
+
+ synchronize_rcu();
+
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
@@ -369,6 +372,8 @@ static void xfrm_state_gc_task(struct work_struct *work)
hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
+ synchronize_rcu();
+
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
}