aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/compat/compat.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/compat/compat.h')
-rw-r--r--src/compat/compat.h489
1 files changed, 368 insertions, 121 deletions
diff --git a/src/compat/compat.h b/src/compat/compat.h
index ba2f028..824f57c 100644
--- a/src/compat/compat.h
+++ b/src/compat/compat.h
@@ -1,6 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#ifndef _WG_COMPAT_H
@@ -14,11 +14,15 @@
#ifdef RHEL_MAJOR
#if RHEL_MAJOR == 7
#define ISRHEL7
+#elif RHEL_MAJOR == 8
+#define ISRHEL8
#endif
#endif
#ifdef UTS_UBUNTU_RELEASE_ABI
#if LINUX_VERSION_CODE == KERNEL_VERSION(3, 13, 11)
#define ISUBUNTU1404
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+#define ISUBUNTU1604
#endif
#endif
#ifdef CONFIG_SUSE_KERNEL
@@ -43,6 +47,7 @@
#endif
#include <linux/cache.h>
+#include <linux/init.h>
#ifndef __ro_after_init
#define __ro_after_init __read_mostly
#endif
@@ -51,6 +56,13 @@
#ifndef READ_ONCE
#define READ_ONCE ACCESS_ONCE
#endif
+#ifndef WRITE_ONCE
+#ifdef ACCESS_ONCE_RW
+#define WRITE_ONCE(p, v) (ACCESS_ONCE_RW(p) = (v))
+#else
+#define WRITE_ONCE(p, v) (ACCESS_ONCE(p) = (v))
+#endif
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
#include "udp_tunnel/udp_tunnel_partial_compat.h"
@@ -78,19 +90,12 @@
(LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 27) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || \
(LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || \
(LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 40) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 54))) && !defined(ISUBUNTU1404)
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 54))) && !defined(ISUBUNTU1404) && (!defined(ISRHEL7) || RHEL_MINOR < 7) /* TODO: remove < 7 workaround once CentOS 7.7 comes out. */
#include <linux/if.h>
#include <net/ip_tunnels.h>
#define IP6_ECN_set_ce(a, b) IP6_ECN_set_ce(b)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
-#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
-#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7)
#include <net/ipv6.h>
struct ipv6_stub_type {
@@ -109,7 +114,7 @@ static const struct ipv6_stub_type *ipv6_stub = &ipv6_stub_impl;
#include <net/addrconf.h>
static inline bool ipv6_mod_enabled(void)
{
- return ipv6_stub->udpv6_encap_enable != NULL;
+ return ipv6_stub != NULL && ipv6_stub->udpv6_encap_enable != NULL;
}
#endif
@@ -126,7 +131,7 @@ static inline void skb_reset_tc(struct sk_buff *skb)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
#include <linux/random.h>
#include <linux/siphash.h>
-static inline u32 __wgcompat_get_random_u32(void)
+static inline u32 __compat_get_random_u32(void)
{
static siphash_key_t key;
static u32 counter = 0;
@@ -141,7 +146,7 @@ static inline u32 __wgcompat_get_random_u32(void)
#endif
return siphash_2u32(counter++, get_random_int(), &key);
}
-#define get_random_u32 __wgcompat_get_random_u32
+#define get_random_u32 __compat_get_random_u32
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(ISRHEL7)
@@ -149,6 +154,7 @@ static inline void netif_keep_dst(struct net_device *dev)
{
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
+#define COMPAT_CANNOT_USE_CSUM_LEVEL
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7)
@@ -180,7 +186,7 @@ static inline void netif_keep_dst(struct net_device *dev)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7)
#include "checksum/checksum_partial_compat.h"
-static inline void *our_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
+static inline void *__compat_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
@@ -188,7 +194,7 @@ static inline void *our_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int
}
return skb_put(tail, len);
}
-#define pskb_put our_pskb_put
+#define pskb_put __compat_pskb_put
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7)
@@ -215,10 +221,11 @@ static inline void skb_scrub_packet(struct sk_buff *skb, bool xnet)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) || defined(ISUBUNTU1404)) && !defined(ISRHEL7)
#include <linux/random.h>
-static inline u32 prandom_u32_max(u32 ep_ro)
+static inline u32 __compat_prandom_u32_max(u32 ep_ro)
{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+ return (u32)(((u64)prandom_u32() * ep_ro) >> 32);
}
+#define prandom_u32_max __compat_prandom_u32_max
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 75) && !defined(ISRHEL7)
@@ -271,8 +278,8 @@ static inline void memzero_explicit(void *s, size_t count)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(ISRHEL7)
-static const struct in6_addr our_in6addr_any = IN6ADDR_ANY_INIT;
-#define in6addr_any our_in6addr_any
+static const struct in6_addr __compat_in6addr_any = IN6ADDR_ANY_INIT;
+#define in6addr_any __compat_in6addr_any
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && !defined(ISOPENSUSE15)
@@ -318,6 +325,59 @@ static inline int wait_for_random_bytes(void)
return 0;
}
#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && !defined(ISRHEL8)
+#include <linux/random.h>
+#include <linux/slab.h>
+struct rng_is_initialized_callback {
+ struct random_ready_callback cb;
+ atomic_t *rng_state;
+};
+static inline void rng_is_initialized_callback(struct random_ready_callback *cb)
+{
+ struct rng_is_initialized_callback *rdy = container_of(cb, struct rng_is_initialized_callback, cb);
+ atomic_set(rdy->rng_state, 2);
+ kfree(rdy);
+}
+static inline bool rng_is_initialized(void)
+{
+ static atomic_t rng_state = ATOMIC_INIT(0);
+
+ if (atomic_read(&rng_state) == 2)
+ return true;
+
+ if (atomic_cmpxchg(&rng_state, 0, 1) == 0) {
+ int ret;
+ struct rng_is_initialized_callback *rdy = kmalloc(sizeof(*rdy), GFP_ATOMIC);
+ if (!rdy) {
+ atomic_set(&rng_state, 0);
+ return false;
+ }
+ rdy->cb.owner = THIS_MODULE;
+ rdy->cb.func = rng_is_initialized_callback;
+ rdy->rng_state = &rng_state;
+ ret = add_random_ready_callback(&rdy->cb);
+ if (ret)
+ kfree(rdy);
+ if (ret == -EALREADY) {
+ atomic_set(&rng_state, 2);
+ return true;
+ } else if (ret)
+ atomic_set(&rng_state, 0);
+ return false;
+ }
+ return false;
+}
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
+/* This is a disaster. Without this API, we really have no way of
+ * knowing if it's initialized. We just return that it has and hope
+ * for the best... */
+static inline bool rng_is_initialized(void)
+{
+ return true;
+}
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && !defined(ISOPENSUSE15)
static inline int get_random_bytes_wait(void *buf, int nbytes)
{
@@ -333,17 +393,43 @@ static inline int get_random_bytes_wait(void *buf, int nbytes)
#define system_power_efficient_wq system_unbound_wq
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
#include <linux/ktime.h>
-static inline u64 ktime_get_ns(void)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+#include <linux/hrtimer.h>
+#ifndef ktime_get_real_ts64
+#define timespec64 timespec
+#define ktime_get_real_ts64 ktime_get_real_ts
+#endif
+#else
+#include <linux/timekeeping.h>
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+static inline u64 __compat_jiffies64_to_nsecs(u64 j)
{
- return ktime_to_ns(ktime_get());
+#if !(NSEC_PER_SEC % HZ)
+ return (NSEC_PER_SEC / HZ) * j;
+#else
+ return div_u64(j * HZ_TO_USEC_NUM, HZ_TO_USEC_DEN) * 1000;
+#endif
+}
+#define jiffies64_to_nsecs __compat_jiffies64_to_nsecs
+#endif
+static inline u64 ktime_get_coarse_boottime_ns(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ return ktime_to_ns(ktime_get_boottime());
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 12) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 53)
+ return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT));
+#else
+ return ktime_to_ns(ktime_get_coarse_boottime());
+#endif
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
#include <linux/inetdevice.h>
-static inline __be32 our_confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope)
+static inline __be32 __compat_confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope)
{
int same = 0;
__be32 addr = 0;
@@ -370,17 +456,17 @@ static inline __be32 our_confirm_addr_indev(struct in_device *in_dev, __be32 dst
} endfor_ifa(in_dev);
return same ? addr : 0;
}
-static inline __be32 our_inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope)
+static inline __be32 __compat_inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope)
{
__be32 addr = 0;
struct net_device *dev;
if (in_dev)
- return our_confirm_addr_indev(in_dev, dst, local, scope);
+ return __compat_confirm_addr_indev(in_dev, dst, local, scope);
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
- addr = our_confirm_addr_indev(in_dev, dst, local, scope);
+ addr = __compat_confirm_addr_indev(in_dev, dst, local, scope);
if (addr)
break;
}
@@ -388,14 +474,14 @@ static inline __be32 our_inet_confirm_addr(struct net *net, struct in_device *in
rcu_read_unlock();
return addr;
}
-#define inet_confirm_addr our_inet_confirm_addr
+#define inet_confirm_addr __compat_inet_confirm_addr
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/slab.h>
-static inline void *kvmalloc_ours(size_t size, gfp_t flags)
+static inline void *__compat_kvmalloc(size_t size, gfp_t flags)
{
gfp_t kmalloc_flags = flags;
void *ret;
@@ -409,25 +495,25 @@ static inline void *kvmalloc_ours(size_t size, gfp_t flags)
return ret;
return __vmalloc(size, flags, PAGE_KERNEL);
}
-static inline void *kvzalloc_ours(size_t size, gfp_t flags)
+static inline void *__compat_kvzalloc(size_t size, gfp_t flags)
{
- return kvmalloc_ours(size, flags | __GFP_ZERO);
+ return __compat_kvmalloc(size, flags | __GFP_ZERO);
}
-#define kvmalloc kvmalloc_ours
-#define kvzalloc kvzalloc_ours
+#define kvmalloc __compat_kvmalloc
+#define kvzalloc __compat_kvzalloc
#endif
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) && !defined(ISUBUNTU1404)
#include <linux/vmalloc.h>
#include <linux/mm.h>
-static inline void kvfree_ours(const void *addr)
+static inline void __compat_kvfree(const void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
-#define kvfree kvfree_ours
+#define kvfree __compat_kvfree
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
@@ -436,7 +522,7 @@ static inline void kvfree_ours(const void *addr)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && !defined(ISOPENSUSE15)
-#define newlink(a,b,c,d,e) newlink(a,b,c,d)
+#define wg_newlink(a,b,c,d,e) wg_newlink(a,b,c,d)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
@@ -481,40 +567,42 @@ static inline struct nlattr **genl_family_attrbuf(const struct genl_family *fami
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 2) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 16) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 65) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 101) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 84)
-#define ___COMPAT_NETLINK_DUMP_BLOCK { int ret; skb->end -= nlmsg_total_size(sizeof(int)); ret = get_device_dump_real(skb, cb); skb->end += nlmsg_total_size(sizeof(int)); return ret; }
-#define ___COMPAT_NETLINK_DUMP_OVERRIDE
+#define __COMPAT_NETLINK_DUMP_BLOCK { \
+ int ret; \
+ skb->end -= nlmsg_total_size(sizeof(int)); \
+ ret = wg_get_device_dump_real(skb, cb); \
+ skb->end += nlmsg_total_size(sizeof(int)); \
+ return ret; \
+}
+#define __COMPAT_NETLINK_DUMP_OVERRIDE
#else
-#define ___COMPAT_NETLINK_DUMP_BLOCK return get_device_dump_real(skb, cb);
+#define __COMPAT_NETLINK_DUMP_BLOCK return wg_get_device_dump_real(skb, cb);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 25) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 87)
-#define get_device_dump(a, b) get_device_dump_real(a, b); \
-static int get_device_dump(a, b) { \
- struct wireguard_device *wg = (struct wireguard_device *)cb->args[0]; \
+#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \
+static int wg_get_device_dump(a, b) { \
+ struct wg_device *wg = (struct wg_device *)cb->args[0]; \
if (!wg) { \
- int ret = get_device_start(cb); \
+ int ret = wg_get_device_start(cb); \
if (ret) \
return ret; \
} \
- ___COMPAT_NETLINK_DUMP_BLOCK \
+ __COMPAT_NETLINK_DUMP_BLOCK \
} \
-static int get_device_dump_real(a, b)
+static int wg_get_device_dump_real(a, b)
#define COMPAT_CANNOT_USE_NETLINK_START
-#elif defined(___COMPAT_NETLINK_DUMP_OVERRIDE)
-#define get_device_dump(a, b) get_device_dump_real(a, b); \
-static int get_device_dump(a, b) { \
- ___COMPAT_NETLINK_DUMP_BLOCK \
+#elif defined(__COMPAT_NETLINK_DUMP_OVERRIDE)
+#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \
+static int wg_get_device_dump(a, b) { \
+ __COMPAT_NETLINK_DUMP_BLOCK \
} \
-static int get_device_dump_real(a, b)
+static int wg_get_device_dump_real(a, b)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
#define COMPAT_CANNOT_USE_IN6_DEV_GET
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
-#define COMPAT_CANNOT_USE_DEV_CNF
-#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
#define COMPAT_CANNOT_USE_IFF_NO_QUEUE
#endif
@@ -526,7 +614,7 @@ static int get_device_dump_real(a, b)
#include <asm/xcr.h>
static inline int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
- return xgetbv(XCR_XFEATURE_ENABLED_MASK) & xfeatures_needed;
+ return boot_cpu_has(X86_FEATURE_XSAVE) && xgetbv(XCR_XFEATURE_ENABLED_MASK) & xfeatures_needed;
}
#endif
#ifndef XFEATURE_MASK_YMM
@@ -559,8 +647,8 @@ static inline int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_n
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
-struct _____dummy_container { char dev; };
-#define netdev_notifier_info net_device *)data); __attribute((unused)) char _____dummy = ((struct _____dummy_container
+struct __compat_dummy_container { char dev; };
+#define netdev_notifier_info net_device *)data); __attribute((unused)) char __compat_dummy_variable = ((struct __compat_dummy_container
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
@@ -572,11 +660,6 @@ struct _____dummy_container { char dev; };
#define COMPAT_CANNOT_USE_AVX512
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
-#define timespec64 timespec
-#define getnstimeofday64 getnstimeofday
-#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
#include <net/genetlink.h>
#define genl_dump_check_consistent(a, b) genl_dump_check_consistent(a, b, &genl_family)
@@ -591,6 +674,221 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned
}
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7)
+#define napi_complete_done(n, work_done) napi_complete(n)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+#include <linux/netdevice.h>
+/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe.
+ * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to
+ * napi_hash_add inside of netif_napi_add.
+ */
+#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#include <linux/atomic.h>
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) ({ int __compat_p1 = atomic_read(v); smp_rmb(); __compat_p1; })
+#endif
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) ({ smp_wmb(); atomic_set(v, i); })
+#endif
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+#include <linux/atomic.h>
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
+static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
+}
+static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+#include <crypto/algapi.h>
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+
+ while (size > 0) {
+ *d++ = *s1++ ^ *s2++;
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ if (unlikely(dst != src1))
+ memmove(dst, src1, size);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ crypto_xor(dst, src2, size);
+#else
+ __crypto_xor(dst, src2, size);
+#endif
+ }
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+#define read_cpuid_part() read_cpuid_part_number()
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7)
+#define hlist_add_behind(a, b) hlist_add_after(b, a)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
+#define totalram_pages() totalram_pages
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
+struct __kernel_timespec {
+ int64_t tv_sec, tv_nsec;
+};
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
+#include <linux/time64.h>
+#ifdef __kernel_timespec
+#undef __kernel_timespec
+struct __kernel_timespec {
+ int64_t tv_sec, tv_nsec;
+};
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
+#include <linux/kernel.h>
+#ifndef ALIGN_DOWN
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
+#include <linux/skbuff.h>
+#define skb_probe_transport_header(a) skb_probe_transport_header(a, 0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && !defined(ISRHEL7)
+#define ignore_df local_df
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
+/* Note that all intentional uses of the non-_bh variety need to explicitly
+ * undef these, conditionalized on COMPAT_CANNOT_DEPRECIATE_BH_RCU.
+ */
+#include <linux/rcupdate.h>
+static __always_inline void old_synchronize_rcu(void)
+{
+ synchronize_rcu();
+}
+static __always_inline void old_call_rcu(void *a, void *b)
+{
+ call_rcu(a, b);
+}
+static __always_inline void old_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+#ifdef synchronize_rcu
+#undef synchronize_rcu
+#endif
+#ifdef call_rcu
+#undef call_rcu
+#endif
+#ifdef rcu_barrier
+#undef rcu_barrier
+#endif
+#define synchronize_rcu synchronize_rcu_bh
+#define call_rcu call_rcu_bh
+#define rcu_barrier rcu_barrier_bh
+#define COMPAT_CANNOT_DEPRECIATE_BH_RCU
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && !defined(ISRHEL8)
+static inline void skb_mark_not_on_list(struct sk_buff *skb)
+{
+ skb->next = NULL;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8)
+#define NLA_EXACT_LEN NLA_UNSPEC
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
+#define NLA_MIN_LEN NLA_UNSPEC
+#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && defined(__aarch64__)
+#define cpu_have_named_feature(name) (elf_hwcap & (HWCAP_ ## name))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
+#include <linux/stddef.h>
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)
+#define genl_dumpit_info(cb) ({ \
+ struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \
+ BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \
+ a->attrs = genl_family_attrbuf(&genl_family); \
+ if (nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, a->attrs, genl_family.maxattr, device_policy, NULL) < 0) \
+ memset(a->attrs, 0, (genl_family.maxattr + 1) * sizeof(struct nlattr *)); \
+ a; \
+})
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18)
+#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst
+#endif
+
+#if defined(ISUBUNTU1604)
+#include <linux/siphash.h>
+#ifndef _WG_LINUX_SIPHASH_H
+#define hsiphash_2u32 siphash_2u32
+#define hsiphash_3u32 siphash_3u32
+#define hsiphash_key_t siphash_key_t
+#endif
+#endif
+
+#ifdef CONFIG_VE
+#include <linux/netdev_features.h>
+#ifdef NETIF_F_VIRTUAL
+#undef NETIF_F_LLTX
+#define NETIF_F_LLTX (__NETIF_F(LLTX) | __NETIF_F(VIRTUAL))
+#endif
+#endif
+
+/* https://github.com/ClangBuiltLinux/linux/issues/7 */
+#if defined( __clang__) && (!defined(CONFIG_CLANG_VERSION) || CONFIG_CLANG_VERSION < 80000)
+#include <linux/bug.h>
+#undef BUILD_BUG_ON
+#define BUILD_BUG_ON(x)
+#endif
+
/* https://lkml.kernel.org/r/20170624021727.17835-1-Jason@zx2c4.com */
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/ip.h>
@@ -598,7 +896,9 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/netfilter/nf_conntrack.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
#include <net/netfilter/nf_nat_core.h>
+#endif
static inline void new_icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
enum ip_conntrack_info ctinfo;
@@ -623,72 +923,19 @@ static inline void new_icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32
#define icmpv6_send(a,b,c,d) new_icmpv6_send(a,b,c,d)
#endif
-/* https://lkml.kernel.org/r/20180618234347.13282-1-Jason@zx2c4.com */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
-#include <linux/random.h>
-#include <linux/slab.h>
-struct rng_is_initialized_callback {
- struct random_ready_callback cb;
- atomic_t *rng_state;
-};
-static inline void rng_is_initialized_callback(struct random_ready_callback *cb)
-{
- struct rng_is_initialized_callback *rdy = container_of(cb, struct rng_is_initialized_callback, cb);
- atomic_set(rdy->rng_state, 2);
- kfree(rdy);
-}
-static inline bool rng_is_initialized(void)
-{
- static atomic_t rng_state = ATOMIC_INIT(0);
-
- if (atomic_read(&rng_state) == 2)
- return true;
-
- if (atomic_cmpxchg(&rng_state, 0, 1) == 0) {
- int ret;
- struct rng_is_initialized_callback *rdy = kmalloc(sizeof(*rdy), GFP_ATOMIC);
- if (!rdy) {
- atomic_set(&rng_state, 0);
- return false;
- }
- rdy->cb.owner = THIS_MODULE;
- rdy->cb.func = rng_is_initialized_callback;
- rdy->rng_state = &rng_state;
- ret = add_random_ready_callback(&rdy->cb);
- if (ret)
- kfree(rdy);
- if (ret == -EALREADY) {
- atomic_set(&rng_state, 2);
- return true;
- } else if (ret)
- atomic_set(&rng_state, 0);
- return false;
- }
- return false;
-}
-#else
-/* This is a disaster. Without this API, we really have no way of
- * knowing if it's initialized. We just return that it has and hope
- * for the best... */
-static inline bool rng_is_initialized(void)
-{
- return true;
-}
-#endif
-
/* PaX compatibility */
#ifdef CONSTIFY_PLUGIN
#include <linux/cache.h>
#undef __read_mostly
#define __read_mostly
#endif
-#if defined(RAP_PLUGIN) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+#if (defined(RAP_PLUGIN) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
#include <linux/timer.h>
-#define expired_retransmit_handshake(a) expired_retransmit_handshake(unsigned long timer)
-#define expired_send_keepalive(a) expired_send_keepalive(unsigned long timer)
-#define expired_new_handshake(a) expired_new_handshake(unsigned long timer)
-#define expired_zero_key_material(a) expired_zero_key_material(unsigned long timer)
-#define expired_send_persistent_keepalive(a) expired_send_persistent_keepalive(unsigned long timer)
+#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer)
+#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer)
+#define wg_expired_new_handshake(a) wg_expired_new_handshake(unsigned long timer)
+#define wg_expired_zero_key_material(a) wg_expired_zero_key_material(unsigned long timer)
+#define wg_expired_send_persistent_keepalive(a) wg_expired_send_persistent_keepalive(unsigned long timer)
#undef timer_setup
#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a))
#undef from_timer