From d814238650b16d9fd990ce604feb94b6ee31f9eb Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 2 Feb 2019 14:13:03 -0800 Subject: hashtables: decouple hashtable allocations from the main device allocation The hashtable allocations are quite large, and cause the device allocation in the net framework to stall sometimes while it tries to find a contiguous region that can fit the device struct: [<0000000000000000>] __switch_to+0x94/0xb8 [<0000000000000000>] __alloc_pages_nodemask+0x764/0x7e8 [<0000000000000000>] kmalloc_order+0x20/0x40 [<0000000000000000>] __kmalloc+0x144/0x1a0 [<0000000000000000>] alloc_netdev_mqs+0x5c/0x368 [<0000000000000000>] rtnl_create_link+0x48/0x180 [<0000000000000000>] rtnl_newlink+0x410/0x708 [<0000000000000000>] rtnetlink_rcv_msg+0x190/0x1f8 [<0000000000000000>] netlink_rcv_skb+0x4c/0xf8 [<0000000000000000>] rtnetlink_rcv+0x30/0x40 [<0000000000000000>] netlink_unicast+0x18c/0x208 [<0000000000000000>] netlink_sendmsg+0x19c/0x348 [<0000000000000000>] sock_sendmsg+0x3c/0x58 [<0000000000000000>] ___sys_sendmsg+0x290/0x2b0 [<0000000000000000>] __sys_sendmsg+0x58/0xa0 [<0000000000000000>] SyS_sendmsg+0x10/0x20 [<0000000000000000>] el0_svc_naked+0x34/0x38 [<0000000000000000>] 0xffffffffffffffff To fix the allocation stalls, decouple the hashtable allocations from the device allocation and allocate the hashtables with kvmalloc's implicit __GFP_NORETRY so that the allocations fall back to vmalloc with little resistance. Signed-off-by: Sultan Alsawaf --- src/hashtables.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'src/hashtables.c') diff --git a/src/hashtables.c b/src/hashtables.c index 18cac91..8aedc17 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -19,11 +19,17 @@ static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; } -void wg_pubkey_hashtable_init(struct pubkey_hashtable *table) +struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) { + struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + get_random_bytes(&table->key, sizeof(table->key)); hash_init(table->hashtable); mutex_init(&table->lock); + return table; } void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, @@ -74,10 +80,16 @@ static struct hlist_head *index_bucket(struct index_hashtable *table, (HASH_SIZE(table->hashtable) - 1)]; } -void wg_index_hashtable_init(struct index_hashtable *table) +struct index_hashtable *wg_index_hashtable_alloc(void) { + struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + hash_init(table->hashtable); spin_lock_init(&table->lock); + return table; } /* At the moment, we limit ourselves to 2^20 total peers, which generally might -- cgit v1.2.3-59-g8ed1b