From 39a91ca75fb7bd9318e484704e0e44a2bda1744a Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 4 Jul 2017 05:52:55 +0200 Subject: ratelimiter: use kvzalloc for hash table allocation --- src/compat/compat.h | 35 +++++++++++++++++++++++++++++++++++ src/ratelimiter.c | 15 ++++++--------- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/src/compat/compat.h b/src/compat/compat.h index 1da51f1..38803b2 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -300,6 +300,41 @@ static inline u64 ktime_get_ns(void) #define inet_confirm_addr(a,b,c,d,e) inet_confirm_addr(b,c,d,e) #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#include +#include +static inline void *kvmalloc(size_t size, gfp_t flags) +{ + gfp_t kmalloc_flags = flags; + void *ret; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + if (!(kmalloc_flags & __GFP_REPEAT) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } + ret = kmalloc(size, kmalloc_flags); + if (ret || size <= PAGE_SIZE) + return ret; + return __vmalloc(size, flags, PAGE_KERNEL); +} +static inline void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | __GFP_ZERO); +} +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) && !defined(ISUBUNTU1404) +#include +static inline void kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +#endif + /* https://lkml.org/lkml/2017/6/23/790 */ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include diff --git a/src/ratelimiter.c b/src/ratelimiter.c index b3fdd4c..ebad1f4 100644 --- a/src/ratelimiter.c +++ b/src/ratelimiter.c @@ -2,9 +2,8 @@ #include "ratelimiter.h" #include -#include +#include #include -#include #include static struct kmem_cache *entry_cache; @@ -154,18 +153,16 @@ int ratelimiter_init(void) table_size = (totalram_pages > (1 << 30) / PAGE_SIZE) ? 8192 : max_t(unsigned long, 16, roundup_pow_of_two((totalram_pages << PAGE_SHIFT) / (1 << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = vmalloc(table_size * sizeof(struct hlist_head)); + table_v4 = kvzalloc(table_size * sizeof(struct hlist_head), GFP_KERNEL); if (!table_v4) goto err_kmemcache; - __hash_init(table_v4, table_size); #if IS_ENABLED(CONFIG_IPV6) - table_v6 = vmalloc(table_size * sizeof(struct hlist_head)); + table_v6 = kvzalloc(table_size * sizeof(struct hlist_head), GFP_KERNEL); if (!table_v6) { - vfree(table_v4); + kvfree(table_v4); goto err_kmemcache; } - __hash_init(table_v6, table_size); #endif queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); @@ -187,9 +184,9 @@ void ratelimiter_uninit(void) cancel_delayed_work_sync(&gc_work); gc_entries(NULL); synchronize_rcu(); - vfree(table_v4); + kvfree(table_v4); #if IS_ENABLED(CONFIG_IPV6) - vfree(table_v6); + kvfree(table_v6); #endif kmem_cache_destroy(entry_cache); } -- cgit v1.2.3-59-g8ed1b