aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter/nf_queue.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2016-05-13 21:18:52 -0500
committerPablo Neira Ayuso <pablo@netfilter.org>2016-05-25 11:54:22 +0200
commitdc3ee32e96d74dd6c80eed63af5065cb75899299 (patch)
tree36a1998e4e612a8583a6b596130825312afd1cc6 /net/netfilter/nf_queue.c
parentnetfilter: conntrack: remove leftover binary sysctl define (diff)
downloadlinux-dev-dc3ee32e96d74dd6c80eed63af5065cb75899299.tar.xz
linux-dev-dc3ee32e96d74dd6c80eed63af5065cb75899299.zip
netfilter: nf_queue: Make the queue_handler pernet
Florian Weber reported: > Under full load (unshare() in loop -> OOM conditions) we can > get kernel panic: > > BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 > IP: [<ffffffff81476c85>] nfqnl_nf_hook_drop+0x35/0x70 > [..] > task: ffff88012dfa3840 ti: ffff88012dffc000 task.ti: ffff88012dffc000 > RIP: 0010:[<ffffffff81476c85>] [<ffffffff81476c85>] nfqnl_nf_hook_drop+0x35/0x70 > RSP: 0000:ffff88012dfffd80 EFLAGS: 00010206 > RAX: 0000000000000008 RBX: ffffffff81add0c0 RCX: ffff88013fd80000 > [..] > Call Trace: > [<ffffffff81474d98>] nf_queue_nf_hook_drop+0x18/0x20 > [<ffffffff814738eb>] nf_unregister_net_hook+0xdb/0x150 > [<ffffffff8147398f>] netfilter_net_exit+0x2f/0x60 > [<ffffffff8141b088>] ops_exit_list.isra.4+0x38/0x60 > [<ffffffff8141b652>] setup_net+0xc2/0x120 > [<ffffffff8141bd09>] copy_net_ns+0x79/0x120 > [<ffffffff8106965b>] create_new_namespaces+0x11b/0x1e0 > [<ffffffff810698a7>] unshare_nsproxy_namespaces+0x57/0xa0 > [<ffffffff8104baa2>] SyS_unshare+0x1b2/0x340 > [<ffffffff81608276>] entry_SYSCALL_64_fastpath+0x1e/0xa8 > Code: 65 00 48 89 e5 41 56 41 55 41 54 53 83 e8 01 48 8b 97 70 12 00 00 48 98 49 89 f4 4c 8b 74 c2 18 4d 8d 6e 08 49 81 c6 88 00 00 00 <49> 8b 5d 00 48 85 db 74 1a 48 89 df 4c 89 e2 48 c7 c6 90 68 47 > The simple fix for this requires a new pernet variable for struct nf_queue that indicates when it is safe to use the dynamically allocated nf_queue state. As we need a variable anyway make nf_register_queue_handler and nf_unregister_queue_handler pernet. This allows the existing logic of when it is safe to use the state from the nfnetlink_queue module to be reused with no changes except for making it per net. The syncrhonize_rcu from nf_unregister_queue_handler is moved to a new function nfnl_queue_net_exit_batch so that the worst case of having a syncrhonize_rcu in the pernet exit path is not experienced in batch mode. Reported-by: Florian Westphal <fw@strlen.de> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Acked-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_queue.c')
-rw-r--r--net/netfilter/nf_queue.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5baa8e24e6ac..b19ad20a705c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -26,23 +26,21 @@
* Once the queue is registered it must reinject all packets it
* receives, no matter what.
*/
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
/* return EBUSY when somebody else is registered, return EEXIST if the
* same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
{
/* should never happen, we only have one queueing backend in kernel */
- WARN_ON(rcu_access_pointer(queue_handler));
- rcu_assign_pointer(queue_handler, qh);
+ WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+ rcu_assign_pointer(net->nf.queue_handler, qh);
}
EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
{
- RCU_INIT_POINTER(queue_handler, NULL);
- synchronize_rcu();
+ RCU_INIT_POINTER(net->nf.queue_handler, NULL);
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
const struct nf_queue_handler *qh;
rcu_read_lock();
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (qh)
qh->nf_hook_drop(net, ops);
rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
struct nf_queue_entry *entry = NULL;
const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
+ struct net *net = state->net;
/* QUEUE == DROP if no one is waiting, to be safe. */
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (!qh) {
status = -ESRCH;
goto err;