aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--Documentation/netlink/specs/ovpn.yaml367
-rw-r--r--Documentation/netlink/specs/rt-link.yaml16
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c383
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c446
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c274
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c213
-rw-r--r--drivers/net/ovpn/netlink-gen.h41
-rw-r--r--drivers/net/ovpn/netlink.c1258
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1365
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c233
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c598
-rw-r--r--drivers/net/ovpn/tcp.h36
-rw-r--r--drivers/net/ovpn/udp.c439
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/uapi/linux/if_link.h15
-rw-r--r--include/uapi/linux/ovpn.h109
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/net/ovpn/.gitignore2
-rw-r--r--tools/testing/selftests/net/ovpn/Makefile31
-rw-r--r--tools/testing/selftests/net/ovpn/common.sh92
-rw-r--r--tools/testing/selftests/net/ovpn/config10
-rw-r--r--tools/testing/selftests/net/ovpn/data64.key5
-rw-r--r--tools/testing/selftests/net/ovpn/ovpn-cli.c2376
-rw-r--r--tools/testing/selftests/net/ovpn/tcp_peers.txt5
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-chachapoly.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket.sh45
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-float.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test.sh113
-rw-r--r--tools/testing/selftests/net/ovpn/udp_peers.txt5
56 files changed, 9940 insertions, 5 deletions
diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml
new file mode 100644
index 000000000000..096c51f0c69a
--- /dev/null
+++ b/Documentation/netlink/specs/ovpn.yaml
@@ -0,0 +1,367 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+#
+# Copyright (c) 2024-2025, OpenVPN Inc.
+#
+
+name: ovpn
+
+protocol: genetlink
+
+doc: Netlink protocol to control OpenVPN network devices
+
+definitions:
+ -
+ type: const
+ name: nonce-tail-size
+ value: 8
+ -
+ type: enum
+ name: cipher-alg
+ entries: [ none, aes-gcm, chacha20-poly1305 ]
+ -
+ type: enum
+ name: del-peer-reason
+ entries:
+ - teardown
+ - userspace
+ - expired
+ - transport-error
+ - transport-disconnect
+ -
+ type: enum
+ name: key-slot
+ entries: [ primary, secondary ]
+
+attribute-sets:
+ -
+ name: peer
+ attributes:
+ -
+ name: id
+ type: u32
+ doc: >-
+ The unique ID of the peer in the device context. To be used to identify
+ peers during operations for a specific device
+ checks:
+ max: 0xFFFFFF
+ -
+ name: remote-ipv4
+ type: u32
+ doc: The remote IPv4 address of the peer
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: remote-ipv6
+ type: binary
+ doc: The remote IPv6 address of the peer
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: remote-ipv6-scope-id
+ type: u32
+ doc: The scope id of the remote IPv6 address of the peer (RFC2553)
+ -
+ name: remote-port
+ type: u16
+ doc: The remote port of the peer
+ byte-order: big-endian
+ checks:
+ min: 1
+ -
+ name: socket
+ type: u32
+ doc: The socket to be used to communicate with the peer
+ -
+ name: socket-netnsid
+ type: s32
+ doc: The ID of the netns the socket assigned to this peer lives in
+ -
+ name: vpn-ipv4
+ type: u32
+ doc: The IPv4 address assigned to the peer by the server
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: vpn-ipv6
+ type: binary
+ doc: The IPv6 address assigned to the peer by the server
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: local-ipv4
+ type: u32
+ doc: The local IPv4 to be used to send packets to the peer (UDP only)
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: local-ipv6
+ type: binary
+ doc: The local IPv6 to be used to send packets to the peer (UDP only)
+ display-hint: ipv6
+ checks:
+ exact-len: 16
+ -
+ name: local-port
+ type: u16
+ doc: The local port to be used to send packets to the peer (UDP only)
+ byte-order: big-endian
+ checks:
+ min: 1
+ -
+ name: keepalive-interval
+ type: u32
+ doc: >-
+ The number of seconds after which a keep alive message is sent to the
+ peer
+ -
+ name: keepalive-timeout
+ type: u32
+ doc: >-
+ The number of seconds from the last activity after which the peer is
+ assumed dead
+ -
+ name: del-reason
+ type: u32
+ doc: The reason why a peer was deleted
+ enum: del-peer-reason
+ -
+ name: vpn-rx-bytes
+ type: uint
+ doc: Number of bytes received over the tunnel
+ -
+ name: vpn-tx-bytes
+ type: uint
+ doc: Number of bytes transmitted over the tunnel
+ -
+ name: vpn-rx-packets
+ type: uint
+ doc: Number of packets received over the tunnel
+ -
+ name: vpn-tx-packets
+ type: uint
+ doc: Number of packets transmitted over the tunnel
+ -
+ name: link-rx-bytes
+ type: uint
+ doc: Number of bytes received at the transport level
+ -
+ name: link-tx-bytes
+ type: uint
+ doc: Number of bytes transmitted at the transport level
+ -
+ name: link-rx-packets
+ type: uint
+ doc: Number of packets received at the transport level
+ -
+ name: link-tx-packets
+ type: uint
+ doc: Number of packets transmitted at the transport level
+ -
+ name: keyconf
+ attributes:
+ -
+ name: peer-id
+ type: u32
+ doc: >-
+ The unique ID of the peer in the device context. To be used to
+ identify peers during key operations
+ checks:
+ max: 0xFFFFFF
+ -
+ name: slot
+ type: u32
+ doc: The slot where the key should be stored
+ enum: key-slot
+ -
+ name: key-id
+ doc: >-
+ The unique ID of the key in the peer context. Used to fetch the
+ correct key upon decryption
+ type: u32
+ checks:
+ max: 7
+ -
+ name: cipher-alg
+ type: u32
+ doc: The cipher to be used when communicating with the peer
+ enum: cipher-alg
+ -
+ name: encrypt-dir
+ type: nest
+ doc: Key material for encrypt direction
+ nested-attributes: keydir
+ -
+ name: decrypt-dir
+ type: nest
+ doc: Key material for decrypt direction
+ nested-attributes: keydir
+ -
+ name: keydir
+ attributes:
+ -
+ name: cipher-key
+ type: binary
+ doc: The actual key to be used by the cipher
+ checks:
+ max-len: 256
+ -
+ name: nonce-tail
+ type: binary
+ doc: >-
+ Random nonce to be concatenated to the packet ID, in order to
+ obtain the actual cipher IV
+ checks:
+ exact-len: nonce-tail-size
+ -
+ name: ovpn
+ attributes:
+ -
+ name: ifindex
+ type: u32
+ doc: Index of the ovpn interface to operate on
+ -
+ name: peer
+ type: nest
+ doc: >-
+ The peer object containing the attributed of interest for the specific
+ operation
+ nested-attributes: peer
+ -
+ name: keyconf
+ type: nest
+ doc: Peer specific cipher configuration
+ nested-attributes: keyconf
+
+operations:
+ list:
+ -
+ name: peer-new
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Add a remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-set
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: modify a remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-get
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Retrieve data about existing remote peers (or a specific one)
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ reply:
+ attributes:
+ - peer
+ dump:
+ request:
+ attributes:
+ - ifindex
+ reply:
+ attributes:
+ - peer
+ -
+ name: peer-del
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Delete existing remote peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - peer
+ -
+ name: peer-del-ntf
+ doc: Notification about a peer being deleted
+ notify: peer-get
+ mcgrp: peers
+
+ -
+ name: key-new
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Add a cipher key for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ -
+ name: key-get
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Retrieve non-sensitive data about peer key and cipher
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ reply:
+ attributes:
+ - keyconf
+ -
+ name: key-swap
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Swap primary and secondary session keys for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+ -
+ name: key-swap-ntf
+ notify: key-get
+ doc: >-
+ Notification about key having exhausted its IV space and requiring
+ renegotiation
+ mcgrp: peers
+ -
+ name: key-del
+ attribute-set: ovpn
+ flags: [ admin-perm ]
+ doc: Delete cipher key for a specific peer
+ do:
+ pre: ovpn-nl-pre-doit
+ post: ovpn-nl-post-doit
+ request:
+ attributes:
+ - ifindex
+ - keyconf
+
+mcast-groups:
+ list:
+ -
+ name: peers
diff --git a/Documentation/netlink/specs/rt-link.yaml b/Documentation/netlink/specs/rt-link.yaml
index 31238455f8e9..a50d9d7d882e 100644
--- a/Documentation/netlink/specs/rt-link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -938,6 +938,12 @@ definitions:
entries:
- name: none
- name: default
+ -
+ name: ovpn-mode
+ type: enum
+ entries:
+ - p2p
+ - mp
attribute-sets:
-
@@ -2272,6 +2278,13 @@ attribute-sets:
-
name: tailroom
type: u16
+ -
+ name: linkinfo-ovpn-attrs
+ attributes:
+ -
+ name: mode
+ type: u8
+ enum: ovpn-mode
sub-messages:
-
@@ -2322,6 +2335,9 @@ sub-messages:
-
value: netkit
attribute-set: linkinfo-netkit-attrs
+ -
+ value: ovpn
+ attribute-set: linkinfo-ovpn-attrs
-
name: linkinfo-member-data-msg
formats:
diff --git a/MAINTAINERS b/MAINTAINERS
index 1248443035f4..350009769173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -18125,6 +18125,17 @@ F: arch/openrisc/
F: drivers/irqchip/irq-ompic.c
F: drivers/irqchip/irq-or1k-*
+OPENVPN DATA CHANNEL OFFLOAD
+M: Antonio Quartulli <antonio@openvpn.net>
+L: openvpn-devel@lists.sourceforge.net (subscribers-only)
+L: netdev@vger.kernel.org
+S: Supported
+T: git https://github.com/OpenVPN/linux-kernel-ovpn.git
+F: Documentation/netlink/specs/ovpn.yaml
+F: drivers/net/ovpn/
+F: include/uapi/linux/ovpn.h
+F: tools/testing/selftests/net/ovpn/
+
OPENVSWITCH
M: Aaron Conole <aconole@redhat.com>
M: Eelco Chaudron <echaudro@redhat.com>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 271520510b5f..b29628d46be9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -115,6 +115,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 75333251a01a..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile
new file mode 100644
index 000000000000..229be66167e1
--- /dev/null
+++ b/drivers/net/ovpn/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# ovpn -- OpenVPN data channel offload in kernel space
+#
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+obj-$(CONFIG_OVPN) := ovpn.o
+ovpn-y += bind.o
+ovpn-y += crypto.o
+ovpn-y += crypto_aead.o
+ovpn-y += main.o
+ovpn-y += io.o
+ovpn-y += netlink.o
+ovpn-y += netlink-gen.o
+ovpn-y += peer.o
+ovpn-y += pktid.o
+ovpn-y += socket.o
+ovpn-y += stats.o
+ovpn-y += tcp.o
+ovpn-y += udp.o
diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c
new file mode 100644
index 000000000000..24d2788a277e
--- /dev/null
+++ b/drivers/net/ovpn/bind.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "peer.h"
+
+/**
+ * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr
+ * @ss: the sockaddr to match
+ *
+ * Return: the bind matching the passed sockaddr if found, NULL otherwise
+ */
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind;
+ size_t sa_len;
+
+ if (ss->ss_family == AF_INET)
+ sa_len = sizeof(struct sockaddr_in);
+ else if (ss->ss_family == AF_INET6)
+ sa_len = sizeof(struct sockaddr_in6);
+ else
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ bind = kzalloc(sizeof(*bind), GFP_ATOMIC);
+ if (unlikely(!bind))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&bind->remote, ss, sa_len);
+
+ return bind;
+}
+
+/**
+ * ovpn_bind_reset - assign new binding to peer
+ * @peer: the peer whose binding has to be replaced
+ * @new: the new bind to assign
+ */
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new)
+{
+ lockdep_assert_held(&peer->lock);
+
+ kfree_rcu(rcu_replace_pointer(peer->bind, new,
+ lockdep_is_held(&peer->lock)), rcu);
+}
diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h
new file mode 100644
index 000000000000..4e0b8398bfd9
--- /dev/null
+++ b/drivers/net/ovpn/bind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNBIND_H_
+#define _NET_OVPN_OVPNBIND_H_
+
+#include <net/ip.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+struct ovpn_peer;
+
+/**
+ * union ovpn_sockaddr - basic transport layer address
+ * @in4: IPv4 address
+ * @in6: IPv6 address
+ */
+union ovpn_sockaddr {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+};
+
+/**
+ * struct ovpn_bind - remote peer binding
+ * @remote: the remote peer sockaddress
+ * @local: local endpoint used to talk to the peer
+ * @local.ipv4: local IPv4 used to talk to the peer
+ * @local.ipv6: local IPv6 used to talk to the peer
+ * @rcu: used to schedule RCU cleanup job
+ */
+struct ovpn_bind {
+ union ovpn_sockaddr remote; /* remote sockaddr */
+
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } local;
+
+ struct rcu_head rcu;
+};
+
+/**
+ * ovpn_bind_skb_src_match - match packet source with binding
+ * @bind: the binding to match
+ * @skb: the packet to match
+ *
+ * Return: true if the packet source matches the remote peer sockaddr
+ * in the binding
+ */
+static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind,
+ const struct sk_buff *skb)
+{
+ const union ovpn_sockaddr *remote;
+
+ if (unlikely(!bind))
+ return false;
+
+ remote = &bind->remote;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (unlikely(remote->in4.sin_family != AF_INET))
+ return false;
+
+ if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr))
+ return false;
+
+ if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ case htons(ETH_P_IPV6):
+ if (unlikely(remote->in6.sin6_family != AF_INET6))
+ return false;
+
+ if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr,
+ &ipv6_hdr(skb)->saddr)))
+ return false;
+
+ if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa);
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind);
+
+#endif /* _NET_OVPN_OVPNBIND_H_ */
diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c
new file mode 100644
index 000000000000..90580e32052f
--- /dev/null
+++ b/drivers/net/ovpn/crypto.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+
+static void ovpn_ks_destroy_rcu(struct rcu_head *head)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(head, struct ovpn_crypto_key_slot, rcu);
+ ovpn_aead_crypto_key_slot_destroy(ks);
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(kref, struct ovpn_crypto_key_slot, refcount);
+ call_rcu(&ks->rcu, ovpn_ks_destroy_rcu);
+}
+
+/* can only be invoked when all peer references have been dropped (i.e. RCU
+ * release routine)
+ */
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = rcu_access_pointer(cs->slots[0]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+
+ ks = rcu_access_pointer(cs->slots[1]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+}
+
+/* removes the key matching the specified id from the crypto context */
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+
+ spin_lock_bh(&cs->lock);
+ if (rcu_access_pointer(cs->slots[0])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[0], NULL,
+ lockdep_is_held(&cs->lock));
+ } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[1], NULL,
+ lockdep_is_held(&cs->lock));
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (ks)
+ ovpn_crypto_key_slot_put(ks);
+
+ /* let the caller know if a key was actually killed */
+ return ks;
+}
+
+/* Reset the ovpn_crypto_state object in a way that is atomic
+ * to RCU readers.
+ */
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr)
+{
+ struct ovpn_crypto_key_slot *old = NULL, *new;
+ u8 idx;
+
+ if (pkr->slot != OVPN_KEY_SLOT_PRIMARY &&
+ pkr->slot != OVPN_KEY_SLOT_SECONDARY)
+ return -EINVAL;
+
+ new = ovpn_aead_crypto_key_slot_new(&pkr->key);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (pkr->slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ old = rcu_replace_pointer(cs->slots[idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ old = rcu_replace_pointer(cs->slots[!idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (old)
+ ovpn_crypto_key_slot_put(old);
+
+ return 0;
+}
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ u8 idx;
+
+ if (slot != OVPN_KEY_SLOT_PRIMARY &&
+ slot != OVPN_KEY_SLOT_SECONDARY) {
+ pr_warn("Invalid slot to release: %u\n", slot);
+ return;
+ }
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ ks = rcu_replace_pointer(cs->slots[idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ ks = rcu_replace_pointer(cs->slots[!idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (!ks) {
+ pr_debug("Key slot already released: %u\n", slot);
+ return;
+ }
+
+ pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id);
+ ovpn_crypto_key_slot_put(ks);
+}
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs)
+{
+ const struct ovpn_crypto_key_slot *old_primary, *old_secondary;
+ u8 idx;
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ old_primary = rcu_dereference_protected(cs->slots[idx],
+ lockdep_is_held(&cs->lock));
+ old_secondary = rcu_dereference_protected(cs->slots[!idx],
+ lockdep_is_held(&cs->lock));
+ /* perform real swap by switching the index of the primary key */
+ WRITE_ONCE(cs->primary_idx, !cs->primary_idx);
+
+ pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n",
+ old_primary ? old_primary->key_id : -1,
+ old_secondary ? old_secondary->key_id : -1);
+
+ spin_unlock_bh(&cs->lock);
+}
+
+/**
+ * ovpn_crypto_config_get - populate keyconf object with non-sensible key data
+ * @cs: the crypto state to extract the key data from
+ * @slot: the specific slot to inspect
+ * @keyconf: the output object to populate
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf)
+{
+ struct ovpn_crypto_key_slot *ks;
+ int idx;
+
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ idx = cs->primary_idx;
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ idx = !cs->primary_idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[idx]);
+ if (!ks) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ keyconf->cipher_alg = ovpn_aead_crypto_alg(ks);
+ keyconf->key_id = ks->key_id;
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h
new file mode 100644
index 000000000000..0e284fec3a75
--- /dev/null
+++ b/drivers/net/ovpn/crypto.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNCRYPTO_H_
+#define _NET_OVPN_OVPNCRYPTO_H_
+
+#include "pktid.h"
+#include "proto.h"
+
+/* info needed for both encrypt and decrypt directions */
+struct ovpn_key_direction {
+ const u8 *cipher_key;
+ size_t cipher_key_size;
+ const u8 *nonce_tail; /* only needed for GCM modes */
+ size_t nonce_tail_size; /* only needed for GCM modes */
+};
+
+/* all info for a particular symmetric key (primary or secondary) */
+struct ovpn_key_config {
+ enum ovpn_cipher_alg cipher_alg;
+ u8 key_id;
+ struct ovpn_key_direction encrypt;
+ struct ovpn_key_direction decrypt;
+};
+
+/* used to pass settings from netlink to the crypto engine */
+struct ovpn_peer_key_reset {
+ enum ovpn_key_slot slot;
+ struct ovpn_key_config key;
+};
+
+struct ovpn_crypto_key_slot {
+ u8 key_id;
+
+ struct crypto_aead *encrypt;
+ struct crypto_aead *decrypt;
+ u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE];
+ u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE];
+
+ struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp;
+ struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp;
+ struct kref refcount;
+ struct rcu_head rcu;
+};
+
+struct ovpn_crypto_state {
+ struct ovpn_crypto_key_slot __rcu *slots[2];
+ u8 primary_idx;
+
+ /* protects primary and secondary slots */
+ spinlock_t lock;
+};
+
+static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks)
+{
+ return kref_get_unless_zero(&ks->refcount);
+}
+
+static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs)
+{
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ cs->primary_idx = 0;
+ spin_lock_init(&cs->lock);
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 idx;
+
+ if (unlikely(!cs))
+ return NULL;
+
+ rcu_read_lock();
+ idx = READ_ONCE(cs->primary_idx);
+ ks = rcu_dereference(cs->slots[idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ ks = rcu_dereference(cs->slots[!idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ /* when both key slots are occupied but no matching key ID is found, ks
+ * has to be reset to NULL to avoid carrying a stale pointer
+ */
+ ks = NULL;
+out:
+ rcu_read_unlock();
+
+ return ks;
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[cs->primary_idx]);
+ if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ rcu_read_unlock();
+
+ return ks;
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref);
+
+static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks)
+{
+ kref_put(&ks->refcount, ovpn_crypto_key_slot_release);
+}
+
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr);
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot);
+
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs);
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs);
+
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf);
+
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id);
+
+#endif /* _NET_OVPN_OVPNCRYPTO_H_ */
diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c
new file mode 100644
index 000000000000..74ee639ac868
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+
+#define OVPN_AUTH_TAG_SIZE 16
+#define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
+
+#define ALG_NAME_AES "gcm(aes)"
+#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)"
+
+static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
+{
+ return OVPN_OPCODE_SIZE + /* OP header size */
+ sizeof(u32) + /* Packet ID */
+ crypto_aead_authsize(ks->encrypt); /* Auth Tag */
+}
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ int nfrags, ret;
+ u32 pktid, op;
+ u8 *iv;
+
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* Sample AEAD header format:
+ * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
+ * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ]
+ * [4-byte
+ * IV head]
+ */
+
+ /* check that there's enough headroom in the skb for packet
+ * encapsulation
+ */
+ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
+ return -ENOBUFS;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* build scatterlist to encrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
+ if (unlikely(nfrags != ret))
+ return -EINVAL;
+
+ /* append auth_tag onto scatterlist */
+ __skb_push(skb, tag_size);
+ sg_set_buf(sg + nfrags + 1, skb->data, tag_size);
+
+ /* obtain packet ID, which is used both as a first
+ * 4 bytes of nonce and last 4 bytes of associated data.
+ */
+ ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
+ if (unlikely(ret < 0))
+ return ret;
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
+ * nonce
+ */
+ ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
+
+ /* make space for packet id and push it to the front */
+ __skb_push(skb, OVPN_NONCE_WIRE_SIZE);
+ memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
+
+ /* add packet op as head of additional data */
+ op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
+ __skb_push(skb, OVPN_OPCODE_SIZE);
+ BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
+ *((__force __be32 *)skb->data) = htonl(op);
+
+ /* AEAD Additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->encrypt);
+ aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg,
+ skb->len - ovpn_aead_encap_overhead(ks), iv);
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* encrypt it */
+ return crypto_aead_encrypt(req);
+}
+
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
+ int ret, payload_len, nfrags;
+ unsigned int payload_offset;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ u8 *iv;
+
+ payload_offset = OVPN_AAD_SIZE + tag_size;
+ payload_len = skb->len - payload_offset;
+
+ ovpn_skb_cb(skb)->payload_offset = payload_offset;
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* sanity check on packet size, payload size must be >= 0 */
+ if (unlikely(payload_len < 0))
+ return -EINVAL;
+
+ /* Prepare the skb data buffer to be accessed up until the auth tag.
+ * This is required because this area is directly mapped into the sg
+ * list.
+ */
+ if (unlikely(!pskb_may_pull(skb, payload_offset)))
+ return -ENODATA;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* packet op is head of additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ /* build scatterlist to decrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
+ if (unlikely(nfrags != ret))
+ return -EINVAL;
+
+ /* append auth_tag onto scatterlist */
+ sg_set_buf(sg + nfrags + 1, skb->data + OVPN_AAD_SIZE, tag_size);
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* copy nonce into IV buffer */
+ memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
+ memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
+ OVPN_NONCE_TAIL_SIZE);
+
+ req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->decrypt);
+ aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
+
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* decrypt it */
+ return crypto_aead_decrypt(req);
+}
+
+/* Initialize a struct crypto_aead object */
+static struct crypto_aead *ovpn_aead_init(const char *title,
+ const char *alg_name,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct crypto_aead *aead;
+ int ret;
+
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
+ aead = NULL;
+ goto error;
+ }
+
+ ret = crypto_aead_setkey(aead, key, keylen);
+ if (ret) {
+ pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
+ keylen, ret);
+ goto error;
+ }
+
+ ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
+ if (ret) {
+ pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
+ ret);
+ goto error;
+ }
+
+ /* basic AEAD assumption */
+ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
+ pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("********* Cipher %s (%s)\n", alg_name, title);
+ pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
+ pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
+ pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
+ pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
+ pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
+
+ return aead;
+
+error:
+ crypto_free_aead(aead);
+ return ERR_PTR(ret);
+}
+
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
+{
+ if (!ks)
+ return;
+
+ crypto_free_aead(ks->encrypt);
+ crypto_free_aead(ks->decrypt);
+ kfree(ks);
+}
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ const char *alg_name;
+ int ret;
+
+ /* validate crypto alg */
+ switch (kc->cipher_alg) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ alg_name = ALG_NAME_AES;
+ break;
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ alg_name = ALG_NAME_CHACHAPOLY;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
+ kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* build the key slot */
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return ERR_PTR(-ENOMEM);
+
+ ks->encrypt = NULL;
+ ks->decrypt = NULL;
+ kref_init(&ks->refcount);
+ ks->key_id = kc->key_id;
+
+ ks->encrypt = ovpn_aead_init("encrypt", alg_name,
+ kc->encrypt.cipher_key,
+ kc->encrypt.cipher_key_size);
+ if (IS_ERR(ks->encrypt)) {
+ ret = PTR_ERR(ks->encrypt);
+ ks->encrypt = NULL;
+ goto destroy_ks;
+ }
+
+ ks->decrypt = ovpn_aead_init("decrypt", alg_name,
+ kc->decrypt.cipher_key,
+ kc->decrypt.cipher_key_size);
+ if (IS_ERR(ks->decrypt)) {
+ ret = PTR_ERR(ks->decrypt);
+ ks->decrypt = NULL;
+ goto destroy_ks;
+ }
+
+ memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+ memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+
+ /* init packet ID generation/validation */
+ ovpn_pktid_xmit_init(&ks->pid_xmit);
+ ovpn_pktid_recv_init(&ks->pid_recv);
+
+ return ks;
+
+destroy_ks:
+ ovpn_aead_crypto_key_slot_destroy(ks);
+ return ERR_PTR(ret);
+}
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
+{
+ const char *alg_name;
+
+ if (!ks->encrypt)
+ return OVPN_CIPHER_ALG_NONE;
+
+ alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
+
+ if (!strcmp(alg_name, ALG_NAME_AES))
+ return OVPN_CIPHER_ALG_AES_GCM;
+ else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
+ return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
+ else
+ return OVPN_CIPHER_ALG_NONE;
+}
diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h
new file mode 100644
index 000000000000..65a2ff307898
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNAEAD_H_
+#define _NET_OVPN_OVPNAEAD_H_
+
+#include "crypto.h"
+
+#include <asm/types.h>
+#include <linux/skbuff.h>
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc);
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks);
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks);
+
+#endif /* _NET_OVPN_OVPNAEAD_H_ */
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
new file mode 100644
index 000000000000..dd8a8055d967
--- /dev/null
+++ b/drivers/net/ovpn/io.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "ovpnpriv.h"
+#include "peer.h"
+#include "io.h"
+#include "bind.h"
+#include "crypto.h"
+#include "crypto_aead.h"
+#include "netlink.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+#include "skb.h"
+#include "socket.h"
+
+const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
+ 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
+ 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
+};
+
+/**
+ * ovpn_is_keepalive - check if skb contains a keepalive message
+ * @skb: packet to check
+ *
+ * Assumes that the first byte of skb->data is defined.
+ *
+ * Return: true if skb contains a keepalive or false otherwise
+ */
+static bool ovpn_is_keepalive(struct sk_buff *skb)
+{
+ if (*skb->data != ovpn_keepalive_message[0])
+ return false;
+
+ if (skb->len != OVPN_KEEPALIVE_SIZE)
+ return false;
+
+ if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
+ return false;
+
+ return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
+}
+
+/* Called after decrypt to write the IP packet to the device.
+ * This method is expected to manage/free the skb.
+ */
+static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ unsigned int pkt_len;
+ int ret;
+
+ /* we can't guarantee the packet wasn't corrupted before entering the
+ * VPN, therefore we give other layers a chance to check that
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* skb hash for transport packet no longer valid after decapsulation */
+ skb_clear_hash(skb);
+
+ /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
+ * interface, based on __skb_tunnel_rx() in dst.h
+ */
+ skb->dev = peer->ovpn->dev;
+ skb_set_queue_mapping(skb, 0);
+ skb_scrub_packet(skb, true);
+
+ /* network header reset in ovpn_decrypt_post() */
+ skb_reset_transport_header(skb);
+ skb_reset_inner_headers(skb);
+
+ /* cause packet to be "received" by the interface */
+ pkt_len = skb->len;
+ ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
+ if (likely(ret == NET_RX_SUCCESS)) {
+ /* update RX stats with the size of decrypted packet */
+ ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
+ dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
+ }
+}
+
+void ovpn_decrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ unsigned int payload_offset = 0;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ __be32 *pid;
+
+ /* crypto is happening asynchronously. this function will be called
+ * again later by the crypto callback with a proper return code
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ payload_offset = ovpn_skb_cb(skb)->payload_offset;
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret < 0))
+ goto drop;
+
+ /* PID sits after the op */
+ pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
+ ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
+ if (unlikely(ret < 0)) {
+ net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ ret);
+ goto drop;
+ }
+
+ /* keep track of last received authenticated packet for keepalive */
+ WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ /* check if this peer changed local or remote endpoint */
+ ovpn_peer_endpoints_update(peer, skb);
+ rcu_read_unlock();
+
+ /* point to encapsulated IP packet */
+ __skb_pull(skb, payload_offset);
+
+ /* check if this is a valid datapacket that has to be delivered to the
+ * ovpn interface
+ */
+ skb_reset_network_header(skb);
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto)) {
+ /* check if null packet */
+ if (unlikely(!pskb_may_pull(skb, 1))) {
+ net_info_ratelimited("%s: NULL packet received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ goto drop;
+ }
+
+ if (ovpn_is_keepalive(skb)) {
+ net_dbg_ratelimited("%s: ping received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ /* we drop the packet, but this is not a failure */
+ consume_skb(skb);
+ goto drop_nocount;
+ }
+
+ net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto drop;
+ }
+ skb->protocol = proto;
+
+ /* perform Reverse Path Filtering (RPF) */
+ if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
+ if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ipv6_hdr(skb)->saddr);
+ else
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ip_hdr(skb)->saddr);
+ goto drop;
+ }
+
+ ovpn_netdev_write(peer, skb);
+ /* skb is passed to upper layer - don't free it */
+ skb = NULL;
+drop:
+ if (unlikely(skb))
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+drop_nocount:
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+}
+
+/* RX path entry point: decrypt packet and forward it to the device */
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 key_id;
+
+ ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
+
+ /* get the key slot matching the key ID in the received packet */
+ key_id = ovpn_key_id_from_skb(skb);
+ ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
+ if (unlikely(!ks)) {
+ net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ key_id);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
+}
+
+void ovpn_encrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ unsigned int orig_len;
+
+ /* encryption is happening asynchronously. This function will be
+ * called later by the crypto callback with a proper return value
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret == -ERANGE)) {
+ /* we ran out of IVs and we must kill the key as it can't be
+ * use anymore
+ */
+ netdev_warn(peer->ovpn->dev,
+ "killing key %u for peer %u\n", ks->key_id,
+ peer->id);
+ if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
+ /* let userspace know so that a new key must be negotiated */
+ ovpn_nl_key_swap_notify(peer, ks->key_id);
+
+ goto err;
+ }
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ skb_mark_not_on_list(skb);
+ orig_len = skb->len;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (unlikely(!sock))
+ goto err_unlock;
+
+ switch (sock->sock->sk->sk_protocol) {
+ case IPPROTO_UDP:
+ ovpn_udp_send_skb(peer, sock->sock, skb);
+ break;
+ case IPPROTO_TCP:
+ ovpn_tcp_send_skb(peer, sock->sock, skb);
+ break;
+ default:
+ /* no transport configured yet */
+ goto err_unlock;
+ }
+
+ ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
+ /* keep track of last sent packet for keepalive */
+ WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
+ /* skb passed down the stack - don't free it */
+ skb = NULL;
+err_unlock:
+ rcu_read_unlock();
+err:
+ if (unlikely(skb))
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+ kfree_skb(skb);
+}
+
+static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ /* get primary key to be used for encrypting data */
+ ks = ovpn_crypto_key_slot_primary(&peer->crypto);
+ if (unlikely(!ks))
+ return false;
+
+ /* take a reference to the peer because the crypto code may run async.
+ * ovpn_encrypt_post() will release it upon completion
+ */
+ if (unlikely(!ovpn_peer_hold(peer))) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ ovpn_crypto_key_slot_put(ks);
+ return false;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
+ return true;
+}
+
+/* send skb to connected peer, if any */
+static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ struct sk_buff *curr, *next;
+
+ /* this might be a GSO-segmented skb list: process each skb
+ * independently
+ */
+ skb_list_walk_safe(skb, curr, next) {
+ if (unlikely(!ovpn_encrypt_one(peer, curr))) {
+ dev_dstats_tx_dropped(ovpn->dev);
+ kfree_skb(curr);
+ }
+ }
+
+ ovpn_peer_put(peer);
+}
+
+/* Send user data to the network
+ */
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct sk_buff *segments, *curr, *next;
+ struct sk_buff_head skb_list;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ int ret;
+
+ /* reset netfilter state */
+ nf_reset_ct(skb);
+
+ /* verify IP header size in network packet */
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto || skb->protocol != proto))
+ goto drop;
+
+ if (skb_is_gso(skb)) {
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments)) {
+ ret = PTR_ERR(segments);
+ net_err_ratelimited("%s: cannot segment payload packet: %d\n",
+ netdev_name(dev), ret);
+ goto drop;
+ }
+
+ consume_skb(skb);
+ skb = segments;
+ }
+
+ /* from this moment on, "skb" might be a list */
+
+ __skb_queue_head_init(&skb_list);
+ skb_list_walk_safe(skb, curr, next) {
+ skb_mark_not_on_list(curr);
+
+ curr = skb_share_check(curr, GFP_ATOMIC);
+ if (unlikely(!curr)) {
+ net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
+ netdev_name(dev));
+ dev_dstats_tx_dropped(ovpn->dev);
+ continue;
+ }
+
+ __skb_queue_tail(&skb_list, curr);
+ }
+ skb_list.prev->next = NULL;
+
+ /* retrieve peer serving the destination IP of this packet */
+ peer = ovpn_peer_get_by_dst(ovpn, skb);
+ if (unlikely(!peer)) {
+ net_dbg_ratelimited("%s: no peer to send data to\n",
+ netdev_name(ovpn->dev));
+ goto drop;
+ }
+
+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
+ ovpn_send(ovpn, skb_list.next, peer);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_dstats_tx_dropped(ovpn->dev);
+ skb_tx_error(skb);
+ kfree_skb_list(skb);
+ return NET_XMIT_DROP;
+}
+
+/**
+ * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
+ * @peer: peer to send the message to
+ * @data: message content
+ * @len: message length
+ *
+ * Assumes that caller holds a reference to peer, which will be
+ * passed to ovpn_send()
+ */
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len)
+{
+ struct ovpn_priv *ovpn;
+ struct sk_buff *skb;
+
+ ovpn = peer->ovpn;
+ if (unlikely(!ovpn)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb = alloc_skb(256 + len, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb_reserve(skb, 128);
+ skb->priority = TC_PRIO_BESTEFFORT;
+ __skb_put_data(skb, data, len);
+
+ ovpn_send(ovpn, skb, peer);
+}
diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h
new file mode 100644
index 000000000000..db9e10f9077c
--- /dev/null
+++ b/drivers/net/ovpn/io.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPN_H_
+#define _NET_OVPN_OVPN_H_
+
+/* DATA_V2 header size with AEAD encryption */
+#define OVPN_HEAD_ROOM (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE + \
+ 16 /* AEAD TAG length */ + \
+ max(sizeof(struct udphdr), sizeof(struct tcphdr)) +\
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)))
+
+/* max padding required by encryption */
+#define OVPN_MAX_PADDING 16
+
+#define OVPN_KEEPALIVE_SIZE 16
+extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE];
+
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb);
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len);
+
+void ovpn_encrypt_post(void *data, int ret);
+void ovpn_decrypt_post(void *data, int ret);
+
+#endif /* _NET_OVPN_OVPN_H_ */
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
new file mode 100644
index 000000000000..0acb0934c1be
--- /dev/null
+++ b/drivers/net/ovpn/main.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/genetlink.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/gro_cells.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/if_arp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_priv_free(struct net_device *net)
+{
+ struct ovpn_priv *ovpn = netdev_priv(net);
+
+ kfree(ovpn->peers);
+}
+
+static int ovpn_mp_alloc(struct ovpn_priv *ovpn)
+{
+ struct in_device *dev_v4;
+ int i;
+
+ if (ovpn->mode != OVPN_MODE_MP)
+ return 0;
+
+ dev_v4 = __in_dev_get_rtnl(ovpn->dev);
+ if (dev_v4) {
+ /* disable redirects as Linux gets confused by ovpn
+ * handling same-LAN routing.
+ * This happens because a multipeer interface is used as
+ * relay point between hosts in the same subnet, while
+ * in a classic LAN this would not be needed because the
+ * two hosts would be able to talk directly.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false;
+ }
+
+ /* the peer container is fairly large, therefore we allocate it only in
+ * MP mode
+ */
+ ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL);
+ if (!ovpn->peers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) {
+ INIT_HLIST_HEAD(&ovpn->peers->by_id[i]);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr4[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr6[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i);
+ }
+
+ return 0;
+}
+
+static int ovpn_net_init(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ int err = gro_cells_init(&ovpn->gro_cells, dev);
+
+ if (err < 0)
+ return err;
+
+ err = ovpn_mp_alloc(ovpn);
+ if (err < 0) {
+ gro_cells_destroy(&ovpn->gro_cells);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ovpn_net_uninit(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ gro_cells_destroy(&ovpn->gro_cells);
+}
+
+static const struct net_device_ops ovpn_netdev_ops = {
+ .ndo_init = ovpn_net_init,
+ .ndo_uninit = ovpn_net_uninit,
+ .ndo_start_xmit = ovpn_net_xmit,
+};
+
+static const struct device_type ovpn_type = {
+ .name = OVPN_FAMILY_NAME,
+};
+
+static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = {
+ [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P,
+ OVPN_MODE_MP),
+};
+
+/**
+ * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn'
+ * @dev: the interface to check
+ *
+ * Return: whether the netdevice is of type 'ovpn'
+ */
+bool ovpn_dev_is_valid(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ovpn_netdev_ops;
+}
+
+static void ovpn_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "ovpn", sizeof(info->driver));
+ strscpy(info->bus_info, "ovpn", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops ovpn_ethtool_ops = {
+ .get_drvinfo = ovpn_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void ovpn_setup(struct net_device *dev)
+{
+ netdev_features_t feat = NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA;
+
+ dev->needs_free_netdev = true;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ dev->ethtool_ops = &ovpn_ethtool_ops;
+ dev->netdev_ops = &ovpn_netdev_ops;
+
+ dev->priv_destructor = ovpn_priv_free;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN - OVPN_HEAD_ROOM;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->lltx = true;
+ dev->features |= feat;
+ dev->hw_features |= feat;
+ dev->hw_enc_features |= feat;
+
+ dev->needed_headroom = ALIGN(OVPN_HEAD_ROOM, 4);
+ dev->needed_tailroom = OVPN_MAX_PADDING;
+
+ SET_NETDEV_DEVTYPE(dev, &ovpn_type);
+}
+
+static int ovpn_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ enum ovpn_mode mode = OVPN_MODE_P2P;
+
+ if (data && data[IFLA_OVPN_MODE]) {
+ mode = nla_get_u8(data[IFLA_OVPN_MODE]);
+ netdev_dbg(dev, "setting device mode: %u\n", mode);
+ }
+
+ ovpn->dev = dev;
+ ovpn->mode = mode;
+ spin_lock_init(&ovpn->lock);
+ INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work);
+
+ /* Set carrier explicitly after registration, this way state is
+ * clearly defined.
+ *
+ * In case of MP interfaces we keep the carrier always on.
+ *
+ * Carrier for P2P interfaces is initially off and it is then
+ * switched on and off when the remote peer is added or deleted.
+ */
+ if (ovpn->mode == OVPN_MODE_MP)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ return register_netdevice(dev);
+}
+
+static void ovpn_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&ovpn->keepalive_work);
+ ovpn_peers_free(ovpn, NULL, OVPN_DEL_PEER_REASON_TEARDOWN);
+ unregister_netdevice_queue(dev, head);
+}
+
+static int ovpn_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ if (nla_put_u8(skb, IFLA_OVPN_MODE, ovpn->mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct rtnl_link_ops ovpn_link_ops = {
+ .kind = "ovpn",
+ .netns_refund = false,
+ .priv_size = sizeof(struct ovpn_priv),
+ .setup = ovpn_setup,
+ .policy = ovpn_policy,
+ .maxtype = IFLA_OVPN_MAX,
+ .newlink = ovpn_newlink,
+ .dellink = ovpn_dellink,
+ .fill_info = ovpn_fill_info,
+};
+
+static int __init ovpn_init(void)
+{
+ int err = rtnl_link_register(&ovpn_link_ops);
+
+ if (err) {
+ pr_err("ovpn: can't register rtnl link ops: %d\n", err);
+ return err;
+ }
+
+ err = ovpn_nl_register();
+ if (err) {
+ pr_err("ovpn: can't register netlink family: %d\n", err);
+ goto unreg_rtnl;
+ }
+
+ ovpn_tcp_init();
+
+ return 0;
+
+unreg_rtnl:
+ rtnl_link_unregister(&ovpn_link_ops);
+ return err;
+}
+
+static __exit void ovpn_cleanup(void)
+{
+ ovpn_nl_unregister();
+ rtnl_link_unregister(&ovpn_link_ops);
+
+ rcu_barrier();
+}
+
+module_init(ovpn_init);
+module_exit(ovpn_cleanup);
+
+MODULE_DESCRIPTION("OpenVPN data channel offload (ovpn)");
+MODULE_AUTHOR("Antonio Quartulli <antonio@openvpn.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h
new file mode 100644
index 000000000000..017cd0100765
--- /dev/null
+++ b/drivers/net/ovpn/main.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_MAIN_H_
+#define _NET_OVPN_MAIN_H_
+
+bool ovpn_dev_is_valid(const struct net_device *dev);
+
+#endif /* _NET_OVPN_MAIN_H_ */
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
new file mode 100644
index 000000000000..58e1a4342378
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink-gen.h"
+
+#include <uapi/linux/ovpn.h>
+
+/* Integer value ranges */
+static const struct netlink_range_validation ovpn_a_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+/* Common nested types */
+const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+ [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+ [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+};
+
+const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = {
+ [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256),
+ [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE),
+};
+
+const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_SOCKET_NETNSID] = { .type = NLA_S32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+ [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4),
+ [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, },
+};
+
+/* OVPN_CMD_PEER_NEW - do */
+static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_SET - do */
+static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - do */
+static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - dump */
+static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* OVPN_CMD_PEER_DEL - do */
+static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_KEY_NEW - do */
+static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_GET - do */
+static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_SWAP - do */
+static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_DEL - do */
+static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* Ops table for ovpn */
+static const struct genl_split_ops ovpn_nl_ops[] = {
+ {
+ .cmd = OVPN_CMD_PEER_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_new_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_SET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_set_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_set_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_get_do_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .dumpit = ovpn_nl_peer_get_dumpit,
+ .policy = ovpn_peer_get_dump_nl_policy,
+ .maxattr = OVPN_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_del_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_new_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_get_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_SWAP,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_swap_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_swap_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_del_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group ovpn_nl_mcgrps[] = {
+ [OVPN_NLGRP_PEERS] = { "peers", },
+};
+
+struct genl_family ovpn_nl_family __ro_after_init = {
+ .name = OVPN_FAMILY_NAME,
+ .version = OVPN_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = ovpn_nl_ops,
+ .n_split_ops = ARRAY_SIZE(ovpn_nl_ops),
+ .mcgrps = ovpn_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps),
+};
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
new file mode 100644
index 000000000000..66a4e4a0a055
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_OVPN_GEN_H
+#define _LINUX_OVPN_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+/* Common nested types */
+extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1];
+extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1];
+extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1];
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ OVPN_NLGRP_PEERS,
+};
+
+extern struct genl_family ovpn_nl_family;
+
+#endif /* _LINUX_OVPN_GEN_H */
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
new file mode 100644
index 000000000000..bea03913bfb1
--- /dev/null
+++ b/drivers/net/ovpn/netlink.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "netlink-gen.h"
+#include "bind.h"
+#include "crypto.h"
+#include "peer.h"
+#include "socket.h"
+
+MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME);
+
+/**
+ * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice
+ * a netlink message is targeting
+ * @net: network namespace where to look for the interface
+ * @info: generic netlink info from the user request
+ * @tracker: tracker object to be used for the netdev reference acquisition
+ *
+ * Return: the ovpn private data, if found, or an error otherwise
+ */
+static struct ovpn_priv *
+ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info,
+ netdevice_tracker *tracker)
+{
+ struct ovpn_priv *ovpn;
+ struct net_device *dev;
+ int ifindex;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX))
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "ifindex does not match any interface");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!ovpn_dev_is_valid(dev)) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "specified interface is not ovpn");
+ NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ovpn = netdev_priv(dev);
+ netdev_hold(dev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return ovpn;
+}
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info),
+ info, tracker);
+
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ info->user_ptr[0] = ovpn;
+
+ return 0;
+}
+
+void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+
+ if (ovpn)
+ netdev_put(ovpn->dev, tracker);
+}
+
+static bool ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ struct in6_addr *in6;
+ __be16 port = 0;
+ __be32 *in;
+
+ ss->ss_family = AF_UNSPEC;
+
+ if (attrs[OVPN_A_PEER_REMOTE_PORT])
+ port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]);
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4]) {
+ ss->ss_family = AF_INET;
+ in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]);
+ } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ ss->ss_family = AF_INET6;
+ in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ } else {
+ return false;
+ }
+
+ switch (ss->ss_family) {
+ case AF_INET6:
+ /* If this is a regular IPv6 just break and move on,
+ * otherwise switch to AF_INET and extract the IPv4 accordingly
+ */
+ if (!ipv6_addr_v4mapped(in6)) {
+ sin6 = (struct sockaddr_in6 *)ss;
+ sin6->sin6_port = port;
+ memcpy(&sin6->sin6_addr, in6, sizeof(*in6));
+ break;
+ }
+
+ /* v4-mapped-v6 address */
+ ss->ss_family = AF_INET;
+ in = &in6->s6_addr32[3];
+ fallthrough;
+ case AF_INET:
+ sin = (struct sockaddr_in *)ss;
+ sin->sin_port = port;
+ sin->sin_addr.s_addr = *in;
+ break;
+ }
+
+ return true;
+}
+
+static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs)
+{
+ u8 *addr6;
+
+ if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6])
+ return NULL;
+
+ if (attrs[OVPN_A_PEER_LOCAL_IPV4])
+ return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]);
+
+ addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ /* this is an IPv4-mapped IPv6 address, therefore extract the actual
+ * v4 address from the last 4 bytes
+ */
+ if (ipv6_addr_v4mapped((struct in6_addr *)addr6))
+ return addr6 + 12;
+
+ return addr6;
+}
+
+static sa_family_t ovpn_nl_family_get(struct nlattr *addr4,
+ struct nlattr *addr6)
+{
+ if (addr4)
+ return AF_INET;
+
+ if (addr6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)nla_data(addr6)))
+ return AF_INET;
+ return AF_INET6;
+ }
+
+ return AF_UNSPEC;
+}
+
+static int ovpn_nl_peer_precheck(struct ovpn_priv *ovpn,
+ struct genl_info *info,
+ struct nlattr **attrs)
+{
+ sa_family_t local_fam, remote_fam;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify both remote IPv4 or IPv6 address");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote port without IP address");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6]) &&
+ !attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote IP address without port");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV4]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPv4 address without remote");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPV6 address without remote");
+ return -EINVAL;
+ }
+
+ /* check that local and remote address families are the same even
+ * after parsing v4mapped IPv6 addresses.
+ * (if addresses are not provided, family will be AF_UNSPEC and
+ * the check is skipped)
+ */
+ local_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_LOCAL_IPV4],
+ attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ remote_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_REMOTE_IPV4],
+ attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ if (local_fam != AF_UNSPEC && remote_fam != AF_UNSPEC &&
+ local_fam != remote_fam) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "mismatching local and remote address families");
+ return -EINVAL;
+ }
+
+ if (remote_fam != AF_INET6 && attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify scope id without remote IPv6 address");
+ return -EINVAL;
+ }
+
+ /* VPN IPs are needed only in MP mode for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] ||
+ attrs[OVPN_A_PEER_VPN_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected VPN IP in P2P mode");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) ||
+ (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "keepalive interval and timeout are required together");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg
+ * @peer: the peer to modify
+ * @info: generic netlink info from the user request
+ * @attrs: the attributes from the user request
+ *
+ * Return: a negative error code in case of failure, 0 on success or 1 on
+ * success and the VPN IPs have been modified (requires rehashing in MP
+ * mode)
+ */
+static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info,
+ struct nlattr **attrs)
+{
+ struct sockaddr_storage ss = {};
+ void *local_ip = NULL;
+ u32 interv, timeout;
+ bool rehash = false;
+ int ret;
+
+ spin_lock_bh(&peer->lock);
+
+ if (ovpn_nl_attr_sockaddr_remote(attrs, &ss)) {
+ /* we carry the local IP in a generic container.
+ * ovpn_peer_reset_sockaddr() will properly interpret it
+ * based on ss.ss_family
+ */
+ local_ip = ovpn_nl_attr_local_ip(attrs);
+
+ /* set peer sockaddr */
+ ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot set peer sockaddr: %d",
+ ret);
+ goto err_unlock;
+ }
+ dst_cache_reset(&peer->dst_cache);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV4]) {
+ rehash = true;
+ peer->vpn_addrs.ipv4.s_addr =
+ nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV6]) {
+ rehash = true;
+ peer->vpn_addrs.ipv6 =
+ nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]);
+ }
+
+ /* when setting the keepalive, both parameters have to be configured */
+ if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) {
+ interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]);
+ timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]);
+ ovpn_peer_keepalive_set(peer, interv, timeout);
+ }
+
+ netdev_dbg(peer->ovpn->dev,
+ "modify peer id=%u endpoint=%pIScp VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n",
+ peer->id, &ss,
+ &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6);
+
+ spin_unlock_bh(&peer->lock);
+
+ return rehash ? 1 : 0;
+err_unlock:
+ spin_unlock_bh(&peer->lock);
+ return ret;
+}
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *ovpn_sock;
+ struct socket *sock = NULL;
+ struct ovpn_peer *peer;
+ u32 sockfd, peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_SOCKET))
+ return -EINVAL;
+
+ /* in MP mode VPN IPs are required for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_A_PEER_VPN_IPV4] &&
+ !attrs[OVPN_A_PEER_VPN_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "VPN IP must be provided in MP mode");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_new(ovpn, peer_id);
+ if (IS_ERR(peer)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot create new peer object for peer %u: %ld",
+ peer_id, PTR_ERR(peer));
+ return PTR_ERR(peer);
+ }
+
+ /* lookup the fd in the kernel table and extract the socket object */
+ sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]);
+ /* sockfd_lookup() increases sock's refcounter */
+ sock = sockfd_lookup(sockfd, &ret);
+ if (!sock) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot lookup peer socket (fd=%u): %d",
+ sockfd, ret);
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ /* Only when using UDP as transport protocol the remote endpoint
+ * can be configured so that ovpn knows where to send packets to.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "missing remote IP address for UDP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ /* In case of TCP, the socket is connected to the peer and ovpn
+ * will just send bytes over it, without the need to specify a
+ * destination.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ ovpn_sock = ovpn_socket_new(sock, peer);
+ /* at this point we unconditionally drop the reference to the socket:
+ * - in case of error, the socket has to be dropped
+ * - if case of success, the socket is configured and let
+ * userspace own the reference, so that the latter can
+ * trigger the final close()
+ */
+ sockfd_put(sock);
+ if (IS_ERR(ovpn_sock)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot encapsulate socket: %ld",
+ PTR_ERR(ovpn_sock));
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ rcu_assign_pointer(peer->sock, ovpn_sock);
+
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0)
+ goto sock_release;
+
+ ret = ovpn_peer_add(ovpn, peer);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot add new peer (id=%u) to hashtable: %d",
+ peer->id, ret);
+ goto sock_release;
+ }
+
+ return 0;
+
+sock_release:
+ ovpn_socket_release(peer);
+peer_release:
+ /* release right away because peer was not yet hashed, thus it is not
+ * used in any context
+ */
+ ovpn_peer_release(peer);
+
+ return ret;
+}
+
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (attrs[OVPN_A_PEER_SOCKET]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "socket cannot be modified");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ /* when using a TCP socket the remote IP is not expected */
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ ovpn_peer_put(peer);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&ovpn->lock);
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return ret;
+ }
+
+ /* ret == 1 means that VPN IPv4/6 has been modified and rehashing
+ * is required
+ */
+ if (ret > 0)
+ ovpn_peer_hash_vpn_ip(peer);
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
+ const struct ovpn_peer *peer, u32 portid, u32 seq,
+ int flags)
+{
+ const struct ovpn_bind *bind;
+ struct ovpn_socket *sock;
+ int ret = -EMSGSIZE;
+ struct nlattr *attr;
+ __be16 local_port;
+ void *hdr;
+ int id;
+
+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags,
+ OVPN_CMD_PEER_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_PEER);
+ if (!attr)
+ goto err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) {
+ id = peernet2id_alloc(genl_info_net(info),
+ sock_net(sock->sock->sk),
+ GFP_ATOMIC);
+ if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
+ goto err_unlock;
+ }
+ local_port = inet_sk(sock->sock->sk)->inet_sport;
+ rcu_read_unlock();
+
+ if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
+ goto err;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY))
+ if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4,
+ peer->vpn_addrs.ipv4.s_addr))
+ goto err;
+
+ if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any))
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6,
+ &peer->vpn_addrs.ipv6))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ peer->keepalive_interval) ||
+ nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ peer->keepalive_timeout))
+ goto err;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (bind) {
+ if (bind->remote.in4.sin_family == AF_INET) {
+ if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4,
+ bind->remote.in4.sin_addr.s_addr) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in4.sin_port) ||
+ nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4,
+ bind->local.ipv4.s_addr))
+ goto err_unlock;
+ } else if (bind->remote.in4.sin_family == AF_INET6) {
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6,
+ &bind->remote.in6.sin6_addr) ||
+ nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ bind->remote.in6.sin6_scope_id) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in6.sin6_port) ||
+ nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6,
+ &bind->local.ipv6))
+ goto err_unlock;
+ }
+ }
+ rcu_read_unlock();
+
+ if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, local_port) ||
+ /* VPN RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES,
+ atomic64_read(&peer->vpn_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS,
+ atomic64_read(&peer->vpn_stats.rx.packets)) ||
+ /* VPN TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES,
+ atomic64_read(&peer->vpn_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS,
+ atomic64_read(&peer->vpn_stats.tx.packets)) ||
+ /* link RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES,
+ atomic64_read(&peer->link_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS,
+ atomic64_read(&peer->link_stats.rx.packets)) ||
+ /* link TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES,
+ atomic64_read(&peer->link_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS,
+ atomic64_read(&peer->link_stats.tx.packets)))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err:
+ genlmsg_cancel(skb, hdr);
+ return ret;
+}
+
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid,
+ info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ int bkt, last_idx = cb->args[1], dumped = 0;
+ netdevice_tracker tracker;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+
+ ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker);
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* if we already dumped a peer it means we are done */
+ if (last_idx)
+ goto out;
+
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (peer) {
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) == 0)
+ dumped++;
+ }
+ rcu_read_unlock();
+ } else {
+ rcu_read_lock();
+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer,
+ hash_entry_id) {
+ /* skip already dumped peers that were dumped by
+ * previous invocations
+ */
+ if (last_idx > 0) {
+ last_idx--;
+ continue;
+ }
+
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ break;
+
+ /* count peers being dumped during this invocation */
+ dumped++;
+ }
+ rcu_read_unlock();
+ }
+
+out:
+ netdev_put(ovpn->dev, &tracker);
+
+ /* sum up peers dumped in this message, so that at the next invocation
+ * we can continue from where we left
+ */
+ cb->args[1] += dumped;
+ return skb->len;
+}
+
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ netdev_dbg(ovpn->dev, "del peer %u\n", peer->id);
+ ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE);
+ ovpn_peer_put(peer);
+
+ return ret;
+}
+
+static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key,
+ enum ovpn_cipher_alg cipher,
+ struct ovpn_key_direction *dir)
+{
+ struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1];
+ int ret;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key,
+ ovpn_keydir_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ switch (cipher) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ if (NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_CIPHER_KEY) ||
+ NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_NONCE_TAIL))
+ return -EINVAL;
+
+ dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+ dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+
+ /* These algorithms require a 96bit nonce,
+ * Construct it by combining 4-bytes packet id and
+ * 8-bytes nonce-tail from userspace
+ */
+ dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_key_new_doit - configure a new key for the specified peer
+ * @skb: incoming netlink message
+ * @info: genetlink metadata
+ *
+ * This function allows the user to install a new key in the peer crypto
+ * state.
+ * Each peer has two 'slots', namely 'primary' and 'secondary', where
+ * keys can be installed. The key in the 'primary' slot is used for
+ * encryption, while both keys can be used for decryption by matching the
+ * key ID carried in the incoming packet.
+ *
+ * The user is responsible for rotating keys when necessary. The user
+ * may fetch peer traffic statistics via netlink in order to better
+ * identify the right time to rotate keys.
+ * The renegotiation follows these steps:
+ * 1. a new key is computed by the user and is installed in the 'secondary'
+ * slot
+ * 2. at user discretion (usually after a predetermined time) 'primary' and
+ * 'secondary' contents are swapped and the new key starts being used for
+ * encryption, while the old key is kept around for decryption of late
+ * packets.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer_key_reset pkr;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_KEY_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_CIPHER_ALG) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_ENCRYPT_DIR) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_DECRYPT_DIR))
+ return -EINVAL;
+
+ pkr.slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+ pkr.key.key_id = nla_get_u32(attrs[OVPN_A_KEYCONF_KEY_ID]);
+ pkr.key.cipher_alg = nla_get_u32(attrs[OVPN_A_KEYCONF_CIPHER_ALG]);
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.encrypt);
+ if (ret < 0)
+ return ret;
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.decrypt);
+ if (ret < 0)
+ return ret;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to set key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ret = ovpn_crypto_state_reset(&peer->crypto, &pkr);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot install new key for peer %u",
+ peer_id);
+ goto out;
+ }
+
+ netdev_dbg(ovpn->dev, "new key installed (id=%u) for peer %u\n",
+ pkr.key.key_id, peer_id);
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info,
+ u32 peer_id, enum ovpn_key_slot slot,
+ const struct ovpn_key_config *keyconf)
+{
+ struct nlattr *attr;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ovpn_nl_family,
+ 0, OVPN_CMD_KEY_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_KEYCONF);
+ if (!attr)
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_key_config keyconf = { 0 };
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot extract key from slot %u for peer %u",
+ slot, peer_id);
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to swap keys for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slots_swap(&peer->crypto);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to delete key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slot_delete(&peer->crypto, slot);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_del_notify - notify userspace about peer being deleted
+ * @peer: the peer being deleted
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n",
+ peer->id, peer->delete_reason);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ attr = nla_nest_start(msg, OVPN_A_PEER);
+ if (!attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason))
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, attr);
+
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
+ msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed
+ * @peer: the peer whose key needs to be renewed
+ * @key_id: the ID of the key that needs to be renewed
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
+{
+ struct ovpn_socket *sock;
+ struct nlattr *k_attr;
+ struct sk_buff *msg;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n",
+ peer->id);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ k_attr = nla_nest_start(msg, OVPN_A_KEYCONF);
+ if (!k_attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, k_attr);
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
+ msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_register - perform any needed registration in the NL subsustem
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int __init ovpn_nl_register(void)
+{
+ int ret = genl_register_family(&ovpn_nl_family);
+
+ if (ret) {
+ pr_err("ovpn: genl_register_family failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_unregister - undo any module wide netlink registration
+ */
+void ovpn_nl_unregister(void)
+{
+ genl_unregister_family(&ovpn_nl_family);
+}
diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h
new file mode 100644
index 000000000000..8615dfc3c472
--- /dev/null
+++ b/drivers/net/ovpn/netlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_NETLINK_H_
+#define _NET_OVPN_NETLINK_H_
+
+int ovpn_nl_register(void);
+void ovpn_nl_unregister(void);
+
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer);
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id);
+
+#endif /* _NET_OVPN_NETLINK_H_ */
diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h
new file mode 100644
index 000000000000..5898f6adada7
--- /dev/null
+++ b/drivers/net/ovpn/ovpnpriv.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTRUCT_H_
+#define _NET_OVPN_OVPNSTRUCT_H_
+
+#include <linux/workqueue.h>
+#include <net/gro_cells.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/ovpn.h>
+
+/**
+ * struct ovpn_peer_collection - container of peers for MultiPeer mode
+ * @by_id: table of peers index by ID
+ * @by_vpn_addr4: table of peers indexed by VPN IPv4 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_vpn_addr6: table of peers indexed by VPN IPv6 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_transp_addr: table of peers indexed by transport address (items can be
+ * rehashed on the fly due to peer IP change)
+ */
+struct ovpn_peer_collection {
+ DECLARE_HASHTABLE(by_id, 12);
+ struct hlist_nulls_head by_vpn_addr4[1 << 12];
+ struct hlist_nulls_head by_vpn_addr6[1 << 12];
+ struct hlist_nulls_head by_transp_addr[1 << 12];
+};
+
+/**
+ * struct ovpn_priv - per ovpn interface state
+ * @dev: the actual netdev representing the tunnel
+ * @mode: device operation mode (i.e. p2p, mp, ..)
+ * @lock: protect this object
+ * @peers: data structures holding multi-peer references
+ * @peer: in P2P mode, this is the only remote peer
+ * @gro_cells: pointer to the Generic Receive Offload cell
+ * @keepalive_work: struct used to schedule keepalive periodic job
+ */
+struct ovpn_priv {
+ struct net_device *dev;
+ enum ovpn_mode mode;
+ spinlock_t lock; /* protect writing to the ovpn_priv object */
+ struct ovpn_peer_collection *peers;
+ struct ovpn_peer __rcu *peer;
+ struct gro_cells gro_cells;
+ struct delayed_work keepalive_work;
+};
+
+#endif /* _NET_OVPN_OVPNSTRUCT_H_ */
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
new file mode 100644
index 000000000000..a37f89fffb02
--- /dev/null
+++ b/drivers/net/ovpn/peer.c
@@ -0,0 +1,1365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <net/ip6_route.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "pktid.h"
+#include "crypto.h"
+#include "io.h"
+#include "main.h"
+#include "netlink.h"
+#include "peer.h"
+#include "socket.h"
+
+static void unlock_ovpn(struct ovpn_priv *ovpn,
+ struct llist_head *release_list)
+ __releases(&ovpn->lock)
+{
+ struct ovpn_peer *peer;
+
+ spin_unlock_bh(&ovpn->lock);
+
+ llist_for_each_entry(peer, release_list->first, release_entry) {
+ ovpn_socket_release(peer);
+ ovpn_peer_put(peer);
+ }
+}
+
+/**
+ * ovpn_peer_keepalive_set - configure keepalive values for peer
+ * @peer: the peer to configure
+ * @interval: outgoing keepalive interval
+ * @timeout: incoming keepalive timeout
+ */
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ netdev_dbg(peer->ovpn->dev,
+ "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
+ peer->id, interval, timeout);
+
+ peer->keepalive_interval = interval;
+ WRITE_ONCE(peer->last_sent, now);
+ peer->keepalive_xmit_exp = now + interval;
+
+ peer->keepalive_timeout = timeout;
+ WRITE_ONCE(peer->last_recv, now);
+ peer->keepalive_recv_exp = now + timeout;
+
+ /* now that interval and timeout have been changed, kick
+ * off the worker so that the next delay can be recomputed
+ */
+ mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
+}
+
+/**
+ * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
+ * @work: pointer to the work member of the related peer object
+ *
+ * NOTE: the reference to peer is not dropped because it gets inherited
+ * by ovpn_xmit_special()
+ */
+static void ovpn_peer_keepalive_send(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ keepalive_work);
+
+ local_bh_disable();
+ ovpn_xmit_special(peer, ovpn_keepalive_message,
+ sizeof(ovpn_keepalive_message));
+ local_bh_enable();
+}
+
+/**
+ * ovpn_peer_new - allocate and initialize a new peer object
+ * @ovpn: the openvpn instance inside which the peer should be created
+ * @id: the ID assigned to this peer
+ *
+ * Return: a pointer to the new peer on success or an error code otherwise
+ */
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
+{
+ struct ovpn_peer *peer;
+ int ret;
+
+ /* alloc and init peer object */
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return ERR_PTR(-ENOMEM);
+
+ peer->id = id;
+ peer->ovpn = ovpn;
+
+ peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
+ peer->vpn_addrs.ipv6 = in6addr_any;
+
+ RCU_INIT_POINTER(peer->bind, NULL);
+ ovpn_crypto_state_init(&peer->crypto);
+ spin_lock_init(&peer->lock);
+ kref_init(&peer->refcount);
+ ovpn_peer_stats_init(&peer->vpn_stats);
+ ovpn_peer_stats_init(&peer->link_stats);
+ INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
+
+ ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(ovpn->dev,
+ "cannot initialize dst cache for peer %u\n",
+ peer->id);
+ kfree(peer);
+ return ERR_PTR(ret);
+ }
+
+ netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_reset_sockaddr - recreate binding for peer
+ * @peer: peer to recreate the binding for
+ * @ss: sockaddr to use as remote endpoint for the binding
+ * @local_ip: local IP for the binding
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip)
+{
+ struct ovpn_bind *bind;
+ size_t ip_len;
+
+ lockdep_assert_held(&peer->lock);
+
+ /* create new ovpn_bind object */
+ bind = ovpn_bind_from_sockaddr(ss);
+ if (IS_ERR(bind))
+ return PTR_ERR(bind);
+
+ if (local_ip) {
+ if (ss->ss_family == AF_INET) {
+ ip_len = sizeof(struct in_addr);
+ } else if (ss->ss_family == AF_INET6) {
+ ip_len = sizeof(struct in6_addr);
+ } else {
+ net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ ss->ss_family, peer->id);
+ kfree(bind);
+ return -EINVAL;
+ }
+
+ memcpy(&bind->local, local_ip, ip_len);
+ }
+
+ /* set binding */
+ ovpn_bind_reset(peer, bind);
+
+ return 0;
+}
+
+/* variable name __tbl2 needs to be different from __tbl1
+ * in the macro below to avoid confusing clang
+ */
+#define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl2 = &(_tbl); \
+ jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \
+})
+
+#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl1 = &(_tbl); \
+ &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
+})
+
+/**
+ * ovpn_peer_endpoints_update - update remote or local endpoint for peer
+ * @peer: peer to update the remote endpoint for
+ * @skb: incoming packet to retrieve the source/destination address from
+ */
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 *sa6;
+ bool reset_cache = false;
+ struct sockaddr_in *sa;
+ struct ovpn_bind *bind;
+ const void *local_ip;
+ size_t salen = 0;
+
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind))
+ goto unlock;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ip_hdr(skb)->daddr;
+ sa = (struct sockaddr_in *)&ss;
+ sa->sin_family = AF_INET;
+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa->sin_port = udp_hdr(skb)->source;
+ salen = sizeof(*sa);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
+ net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv4.s_addr,
+ &ip_hdr(skb)->daddr);
+ bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ipv6_hdr(skb)->daddr;
+ sa6 = (struct sockaddr_in6 *)&ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
+ skb->skb_iif);
+ salen = sizeof(*sa6);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr))) {
+ net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr);
+ bind->local.ipv6 = ipv6_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ default:
+ goto unlock;
+ }
+
+ if (unlikely(reset_cache))
+ dst_cache_reset(&peer->dst_cache);
+
+ /* if the peer did not float, we can bail out now */
+ if (likely(!salen))
+ goto unlock;
+
+ if (unlikely(ovpn_peer_reset_sockaddr(peer,
+ (struct sockaddr_storage *)&ss,
+ local_ip) < 0))
+ goto unlock;
+
+ net_dbg_ratelimited("%s: peer %d floated to %pIScp",
+ netdev_name(peer->ovpn->dev), peer->id, &ss);
+
+ spin_unlock_bh(&peer->lock);
+
+ /* rehashing is required only in MP mode as P2P has one peer
+ * only and thus there is no hashtable
+ */
+ if (peer->ovpn->mode == OVPN_MODE_MP) {
+ spin_lock_bh(&peer->ovpn->lock);
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind)) {
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ return;
+ }
+
+ /* This function may be invoked concurrently, therefore another
+ * float may have happened in parallel: perform rehashing
+ * using the peer->bind->remote directly as key
+ */
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ salen = sizeof(*sa);
+ break;
+ case AF_INET6:
+ salen = sizeof(*sa6);
+ break;
+ }
+
+ /* remove old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ /* re-add with new transport address */
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
+ &bind->remote, salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ }
+ return;
+unlock:
+ spin_unlock_bh(&peer->lock);
+}
+
+/**
+ * ovpn_peer_release_rcu - RCU callback performing last peer release steps
+ * @head: RCU member of the ovpn_peer
+ */
+static void ovpn_peer_release_rcu(struct rcu_head *head)
+{
+ struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
+
+ /* this call will immediately free the dst_cache, therefore we
+ * perform it in the RCU callback, when all contexts are done
+ */
+ dst_cache_destroy(&peer->dst_cache);
+ kfree(peer);
+}
+
+/**
+ * ovpn_peer_release - release peer private members
+ * @peer: the peer to release
+ */
+void ovpn_peer_release(struct ovpn_peer *peer)
+{
+ ovpn_crypto_state_release(&peer->crypto);
+ spin_lock_bh(&peer->lock);
+ ovpn_bind_reset(peer, NULL);
+ spin_unlock_bh(&peer->lock);
+ call_rcu(&peer->rcu, ovpn_peer_release_rcu);
+ netdev_put(peer->ovpn->dev, &peer->dev_tracker);
+}
+
+/**
+ * ovpn_peer_release_kref - callback for kref_put
+ * @kref: the kref object belonging to the peer
+ */
+void ovpn_peer_release_kref(struct kref *kref)
+{
+ struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
+
+ ovpn_peer_release(peer);
+}
+
+/**
+ * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
+ * @skb: the packet to extract data from
+ * @ss: the sockaddr to fill
+ *
+ * Return: sockaddr length on success or -1 otherwise
+ */
+static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ sa4 = (struct sockaddr_in *)ss;
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa4->sin_port = udp_hdr(skb)->source;
+ return sizeof(*sa4);
+ case htons(ETH_P_IPV6):
+ sa6 = (struct sockaddr_in6 *)ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ return sizeof(*sa6);
+ }
+
+ return -1;
+}
+
+/**
+ * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv4 of the nexthop
+ */
+static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
+{
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_uses_gateway)
+ return rt->rt_gw4;
+
+ return ip_hdr(skb)->daddr;
+}
+
+/**
+ * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv6 of the nexthop
+ */
+static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
+{
+ const struct rt6_info *rt = skb_rt6_info(skb);
+
+ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
+ return ipv6_hdr(skb)->daddr;
+
+ return rt->rt6i_gateway;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv4 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
+ __be32 addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
+ sizeof(addr));
+ nhead = &ovpn->peers->by_vpn_addr4[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
+ if (addr == tmp->vpn_addrs.ipv4.s_addr)
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv6 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
+ struct in6_addr *addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
+ sizeof(*addr));
+ nhead = &ovpn->peers->by_vpn_addr6[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
+ if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_transp_match - check if sockaddr and peer binding match
+ * @peer: the peer to get the binding from
+ * @ss: the sockaddr to match
+ *
+ * Return: true if sockaddr and binding match or false otherwise
+ */
+static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind = rcu_dereference(peer->bind);
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ if (unlikely(!bind))
+ return false;
+
+ if (ss->ss_family != bind->remote.in4.sin_family)
+ return false;
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)ss;
+ if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
+ return false;
+ if (sa4->sin_port != bind->remote.in4.sin_port)
+ return false;
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)ss;
+ if (!ipv6_addr_equal(&sa6->sin6_addr,
+ &bind->remote.in6.sin6_addr))
+ return false;
+ if (sa6->sin6_port != bind->remote.in6.sin6_port)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
+ * instance
+ * @ovpn: the openvpn instance to search
+ * @ss: the transport socket address
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *
+ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
+ struct sockaddr_storage *ss)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
+ ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
+ * @ovpn: the openvpn instance to search
+ * @skb: the skb to retrieve the source transport address from
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct sockaddr_storage ss = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ unsigned int slot;
+ ssize_t sa_len;
+
+ sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
+ if (unlikely(sa_len < 0))
+ return NULL;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
+
+ rcu_read_lock();
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
+ nhead = &ovpn->peers->by_transp_addr[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
+ hash_entry_transp_addr) {
+ if (!ovpn_peer_transp_match(tmp, &ss))
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (!peer && get_nulls_value(ntmp) != slot)
+ goto begin;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the ID of the peer to find
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
+ u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id - retrieve peer by ID
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the unique peer identifier to match
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct hlist_head *head;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
+
+ head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
+ sizeof(peer_id));
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
+ if (tmp->id != peer_id)
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return peer;
+}
+
+static void ovpn_peer_remove(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ /* prevent double remove */
+ if (hlist_unhashed(&peer->hash_entry_id))
+ return;
+
+ hlist_del_init_rcu(&peer->hash_entry_id);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ break;
+ case OVPN_MODE_P2P:
+ /* prevent double remove */
+ if (peer != rcu_access_pointer(peer->ovpn->peer))
+ return;
+
+ RCU_INIT_POINTER(peer->ovpn->peer, NULL);
+ /* in P2P mode the carrier is switched off when the peer is
+ * deleted so that third party protocols can react accordingly
+ */
+ netif_carrier_off(peer->ovpn->dev);
+ break;
+ }
+
+ peer->delete_reason = reason;
+ ovpn_nl_peer_del_notify(peer);
+
+ /* append to provided list for later socket release and ref drop */
+ llist_add(&peer->release_entry, release_list);
+}
+
+/**
+ * ovpn_peer_get_by_dst - Lookup peer to send skb to
+ * @ovpn: the private data representing the current VPN session
+ * @skb: the skb to extract the destination address from
+ *
+ * This function takes a tunnel packet and looks up the peer to send it to
+ * after encapsulation. The skb is expected to be the in-tunnel packet, without
+ * any OpenVPN related header.
+ *
+ * Assume that the IP header is accessible in the skb data.
+ *
+ * Return: the peer if found or NULL otherwise.
+ */
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = NULL;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ /* in P2P mode, no matter the destination, packets are always sent to
+ * the single peer listening on the other side
+ */
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+ return peer;
+ }
+
+ rcu_read_lock();
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_skb4(skb);
+ peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_skb6(skb);
+ peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
+ break;
+ }
+
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv4 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .daddr = dest
+ };
+
+ rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
+ if (IS_ERR(rt)) {
+ net_dbg_ratelimited("%s: no route to host %pI4\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ if (!rt->rt_uses_gateway)
+ goto out;
+
+ dest = rt->rt_gw4;
+out:
+ ip_rt_put(rt);
+ return dest;
+}
+
+/**
+ * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv6 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
+ struct in6_addr dest)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *entry;
+ struct rt6_info *rt;
+ struct flowi6 fl = {
+ .daddr = dest,
+ };
+
+ entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
+ NULL);
+ if (IS_ERR(entry)) {
+ net_dbg_ratelimited("%s: no route to host %pI6c\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ rt = dst_rt6_info(entry);
+
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ goto out;
+
+ dest = rt->rt6i_gateway;
+out:
+ dst_release((struct dst_entry *)rt);
+#endif
+ return dest;
+}
+
+/**
+ * ovpn_peer_check_by_src - check that skb source is routed via peer
+ * @ovpn: the openvpn instance to search
+ * @skb: the packet to extract source address from
+ * @peer: the peer to check against the source address
+ *
+ * Return: true if the peer is matching or false otherwise
+ */
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ bool match = false;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* in P2P mode, no matter the destination, packets are always
+ * sent to the single peer listening on the other side
+ */
+ return peer == rcu_access_pointer(ovpn->peer);
+ }
+
+ /* This function performs a reverse path check, therefore we now
+ * lookup the nexthop we would use if we wanted to route a packet
+ * to the source IP. If the nexthop matches the sender we know the
+ * latter is valid and we allow the packet to come in
+ */
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
+ rcu_read_unlock();
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
+ rcu_read_unlock();
+ break;
+ }
+
+ return match;
+}
+
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
+{
+ struct hlist_nulls_head *nhead;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ /* rehashing makes sense only in multipeer mode */
+ if (peer->ovpn->mode != OVPN_MODE_MP)
+ return;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
+ &peer->vpn_addrs.ipv4,
+ sizeof(peer->vpn_addrs.ipv4));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
+ }
+
+ if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
+ &peer->vpn_addrs.ipv6,
+ sizeof(peer->vpn_addrs.ipv6));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
+ }
+}
+
+/**
+ * ovpn_peer_add_mp - add peer to related tables in a MP instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ struct sockaddr_storage sa = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+ struct ovpn_bind *bind;
+ struct ovpn_peer *tmp;
+ size_t salen;
+ int ret = 0;
+
+ spin_lock_bh(&ovpn->lock);
+ /* do not add duplicates */
+ tmp = ovpn_peer_get_by_id(ovpn, peer->id);
+ if (tmp) {
+ ovpn_peer_put(tmp);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ bind = rcu_dereference_protected(peer->bind, true);
+ /* peers connected via TCP have bind == NULL */
+ if (bind) {
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)&sa;
+
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
+ sa4->sin_port = bind->remote.in4.sin_port;
+ salen = sizeof(*sa4);
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)&sa;
+
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = bind->remote.in6.sin6_addr;
+ sa6->sin6_port = bind->remote.in6.sin6_port;
+ salen = sizeof(*sa6);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
+ salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ }
+
+ hlist_add_head_rcu(&peer->hash_entry_id,
+ ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
+ sizeof(peer->id)));
+
+ ovpn_peer_hash_vpn_ip(peer);
+out:
+ spin_unlock_bh(&ovpn->lock);
+ return ret;
+}
+
+/**
+ * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *tmp;
+
+ spin_lock_bh(&ovpn->lock);
+ /* in p2p mode it is possible to have a single peer only, therefore the
+ * old one is released and substituted by the new one
+ */
+ tmp = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (tmp)
+ ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
+ &release_list);
+
+ rcu_assign_pointer(ovpn->peer, peer);
+ /* in P2P mode the carrier is switched on when the peer is added */
+ netif_carrier_on(ovpn->dev);
+ unlock_ovpn(ovpn, &release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_add - add peer to the related tables
+ * @ovpn: the openvpn instance the peer belongs to
+ * @peer: the peer object to add
+ *
+ * Assume refcounter was increased by caller
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ return ovpn_peer_add_mp(ovpn, peer);
+ case OVPN_MODE_P2P:
+ return ovpn_peer_add_p2p(ovpn, peer);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_mp(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+ int ret = -ENOENT;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
+ if (tmp == peer) {
+ ovpn_peer_remove(peer, reason, release_list);
+ ret = 0;
+ }
+
+ if (tmp)
+ ovpn_peer_put(tmp);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = rcu_dereference_protected(peer->ovpn->peer,
+ lockdep_is_held(&peer->ovpn->lock));
+ if (tmp != peer)
+ return -ENOENT;
+
+ ovpn_peer_remove(peer, reason, release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_del - delete peer from related tables
+ * @peer: the peer object to delete
+ * @reason: reason for deleting peer (will be sent to userspace)
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
+{
+ LLIST_HEAD(release_list);
+ int ret = -EOPNOTSUPP;
+
+ spin_lock_bh(&peer->ovpn->lock);
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ ret = ovpn_peer_del_mp(peer, reason, &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ ret = ovpn_peer_del_p2p(peer, reason, &release_list);
+ break;
+ default:
+ break;
+ }
+ unlock_ovpn(peer->ovpn, &release_list);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_release_p2p - release peer upon P2P device teardown
+ * @ovpn: the instance being torn down
+ * @sk: if not NULL, release peer only if it's using this specific socket
+ * @reason: the reason for releasing the peer
+ */
+static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+
+ spin_lock_bh(&ovpn->lock);
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (!peer) {
+ spin_unlock_bh(&ovpn->lock);
+ return;
+ }
+
+ if (sk) {
+ ovpn_sock = rcu_access_pointer(peer->sock);
+ if (!ovpn_sock || ovpn_sock->sock->sk != sk) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return;
+ }
+ }
+
+ ovpn_peer_remove(peer, reason, &release_list);
+ unlock_ovpn(ovpn, &release_list);
+}
+
+static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_bh(&ovpn->lock);
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ bool remove = true;
+
+ /* if a socket was passed as argument, skip all peers except
+ * those using it
+ */
+ if (sk) {
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference(peer->sock);
+ remove = ovpn_sock && ovpn_sock->sock->sk == sk;
+ rcu_read_unlock();
+ }
+
+ if (remove)
+ ovpn_peer_remove(peer, reason, &release_list);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
+
+/**
+ * ovpn_peers_free - free all peers in the instance
+ * @ovpn: the instance whose peers should be released
+ * @sk: if not NULL, only peers using this socket are removed and the socket
+ * is released immediately
+ * @reason: the reason for releasing all peers
+ */
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_P2P:
+ ovpn_peer_release_p2p(ovpn, sk, reason);
+ break;
+ case OVPN_MODE_MP:
+ ovpn_peers_release_mp(ovpn, sk, reason);
+ break;
+ }
+}
+
+static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t last_recv, last_sent, next_run1, next_run2;
+ unsigned long timeout, interval;
+ bool expired;
+
+ spin_lock_bh(&peer->lock);
+ /* we expect both timers to be configured at the same time,
+ * therefore bail out if either is not set
+ */
+ if (!peer->keepalive_timeout || !peer->keepalive_interval) {
+ spin_unlock_bh(&peer->lock);
+ return 0;
+ }
+
+ /* check for peer timeout */
+ expired = false;
+ timeout = peer->keepalive_timeout;
+ last_recv = READ_ONCE(peer->last_recv);
+ if (now < last_recv + timeout) {
+ peer->keepalive_recv_exp = last_recv + timeout;
+ next_run1 = peer->keepalive_recv_exp;
+ } else if (peer->keepalive_recv_exp > now) {
+ next_run1 = peer->keepalive_recv_exp;
+ } else {
+ expired = true;
+ }
+
+ if (expired) {
+ /* peer is dead -> kill it and move on */
+ spin_unlock_bh(&peer->lock);
+ netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
+ peer->id);
+ ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
+ release_list);
+ return 0;
+ }
+
+ /* check for peer keepalive */
+ expired = false;
+ interval = peer->keepalive_interval;
+ last_sent = READ_ONCE(peer->last_sent);
+ if (now < last_sent + interval) {
+ peer->keepalive_xmit_exp = last_sent + interval;
+ next_run2 = peer->keepalive_xmit_exp;
+ } else if (peer->keepalive_xmit_exp > now) {
+ next_run2 = peer->keepalive_xmit_exp;
+ } else {
+ expired = true;
+ next_run2 = now + interval;
+ }
+ spin_unlock_bh(&peer->lock);
+
+ if (expired) {
+ /* a keepalive packet is required */
+ netdev_dbg(peer->ovpn->dev,
+ "sending keepalive to peer %u\n",
+ peer->id);
+ if (schedule_work(&peer->keepalive_work))
+ ovpn_peer_hold(peer);
+ }
+
+ if (next_run1 < next_run2)
+ return next_run1;
+
+ return next_run2;
+}
+
+static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t tmp_next_run, next_run = 0;
+ struct hlist_node *tmp;
+ struct ovpn_peer *peer;
+ int bkt;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+ if (!tmp_next_run)
+ continue;
+
+ /* the next worker run will be scheduled based on the shortest
+ * required interval across all peers
+ */
+ if (!next_run || tmp_next_run < next_run)
+ next_run = tmp_next_run;
+ }
+
+ return next_run;
+}
+
+static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *peer;
+ time64_t next_run = 0;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (peer)
+ next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+
+ return next_run;
+}
+
+/**
+ * ovpn_peer_keepalive_work - run keepalive logic on each known peer
+ * @work: pointer to the work member of the related ovpn object
+ *
+ * Each peer has two timers (if configured):
+ * 1. peer timeout: when no data is received for a certain interval,
+ * the peer is considered dead and it gets killed.
+ * 2. peer keepalive: when no data is sent to a certain peer for a
+ * certain interval, a special 'keepalive' packet is explicitly sent.
+ *
+ * This function iterates across the whole peer collection while
+ * checking the timers described above.
+ */
+void ovpn_peer_keepalive_work(struct work_struct *work)
+{
+ struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
+ keepalive_work.work);
+ time64_t next_run = 0, now = ktime_get_real_seconds();
+ LLIST_HEAD(release_list);
+
+ spin_lock_bh(&ovpn->lock);
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
+ &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
+ &release_list);
+ break;
+ }
+
+ /* prevent rearming if the interface is being destroyed */
+ if (next_run > 0 &&
+ READ_ONCE(ovpn->dev->reg_state) == NETREG_REGISTERED) {
+ netdev_dbg(ovpn->dev,
+ "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
+ next_run, now, next_run - now);
+ schedule_delayed_work(&ovpn->keepalive_work,
+ (next_run - now) * HZ);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
new file mode 100644
index 000000000000..a1423f2b09e0
--- /dev/null
+++ b/drivers/net/ovpn/peer.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPEER_H_
+#define _NET_OVPN_OVPNPEER_H_
+
+#include <net/dst_cache.h>
+#include <net/strparser.h>
+
+#include "crypto.h"
+#include "socket.h"
+#include "stats.h"
+
+/**
+ * struct ovpn_peer - the main remote peer object
+ * @ovpn: main openvpn instance this peer belongs to
+ * @dev_tracker: reference tracker for associated dev
+ * @id: unique identifier
+ * @vpn_addrs: IP addresses assigned over the tunnel
+ * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel
+ * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel
+ * @hash_entry_id: entry in the peer ID hashtable
+ * @hash_entry_addr4: entry in the peer IPv4 hashtable
+ * @hash_entry_addr6: entry in the peer IPv6 hashtable
+ * @hash_entry_transp_addr: entry in the peer transport address hashtable
+ * @sock: the socket being used to talk to this peer
+ * @tcp: keeps track of TCP specific state
+ * @tcp.strp: stream parser context (TCP only)
+ * @tcp.user_queue: received packets that have to go to userspace (TCP only)
+ * @tcp.out_queue: packets on hold while socket is taken by user (TCP only)
+ * @tcp.tx_in_progress: true if TX is already ongoing (TCP only)
+ * @tcp.out_msg.skb: packet scheduled for sending (TCP only)
+ * @tcp.out_msg.offset: offset where next send should start (TCP only)
+ * @tcp.out_msg.len: remaining data to send within packet (TCP only)
+ * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only)
+ * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only)
+ * @tcp.sk_cb.prot: pointer to original prot object (TCP only)
+ * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only)
+ * @crypto: the crypto configuration (ciphers, keys, etc..)
+ * @dst_cache: cache for dst_entry used to send to peer
+ * @bind: remote peer binding
+ * @keepalive_interval: seconds after which a new keepalive should be sent
+ * @keepalive_xmit_exp: future timestamp when next keepalive should be sent
+ * @last_sent: timestamp of the last successfully sent packet
+ * @keepalive_timeout: seconds after which an inactive peer is considered dead
+ * @keepalive_recv_exp: future timestamp when the peer should expire
+ * @last_recv: timestamp of the last authenticated received packet
+ * @vpn_stats: per-peer in-VPN TX/RX stats
+ * @link_stats: per-peer link/transport TX/RX stats
+ * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..)
+ * @lock: protects binding to peer (bind) and keepalive* fields
+ * @refcount: reference counter
+ * @rcu: used to free peer in an RCU safe way
+ * @release_entry: entry for the socket release list
+ * @keepalive_work: used to schedule keepalive sending
+ */
+struct ovpn_peer {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ u32 id;
+ struct {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } vpn_addrs;
+ struct hlist_node hash_entry_id;
+ struct hlist_nulls_node hash_entry_addr4;
+ struct hlist_nulls_node hash_entry_addr6;
+ struct hlist_nulls_node hash_entry_transp_addr;
+ struct ovpn_socket __rcu *sock;
+
+ struct {
+ struct strparser strp;
+ struct sk_buff_head user_queue;
+ struct sk_buff_head out_queue;
+ bool tx_in_progress;
+
+ struct {
+ struct sk_buff *skb;
+ int offset;
+ int len;
+ } out_msg;
+
+ struct {
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ struct proto *prot;
+ const struct proto_ops *ops;
+ } sk_cb;
+
+ struct work_struct defer_del_work;
+ } tcp;
+ struct ovpn_crypto_state crypto;
+ struct dst_cache dst_cache;
+ struct ovpn_bind __rcu *bind;
+ unsigned long keepalive_interval;
+ unsigned long keepalive_xmit_exp;
+ time64_t last_sent;
+ unsigned long keepalive_timeout;
+ unsigned long keepalive_recv_exp;
+ time64_t last_recv;
+ struct ovpn_peer_stats vpn_stats;
+ struct ovpn_peer_stats link_stats;
+ enum ovpn_del_peer_reason delete_reason;
+ spinlock_t lock; /* protects bind and keepalive* */
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct llist_node release_entry;
+ struct work_struct keepalive_work;
+};
+
+/**
+ * ovpn_peer_hold - increase reference counter
+ * @peer: the peer whose counter should be increased
+ *
+ * Return: true if the counter was increased or false if it was zero already
+ */
+static inline bool ovpn_peer_hold(struct ovpn_peer *peer)
+{
+ return kref_get_unless_zero(&peer->refcount);
+}
+
+void ovpn_peer_release(struct ovpn_peer *peer);
+void ovpn_peer_release_kref(struct kref *kref);
+
+/**
+ * ovpn_peer_put - decrease reference counter
+ * @peer: the peer whose counter should be decreased
+ */
+static inline void ovpn_peer_put(struct ovpn_peer *peer)
+{
+ kref_put(&peer->refcount, ovpn_peer_release_kref);
+}
+
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id);
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer);
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason);
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock,
+ enum ovpn_del_peer_reason reason);
+
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer);
+
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout);
+void ovpn_peer_keepalive_work(struct work_struct *work);
+
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb);
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip);
+
+#endif /* _NET_OVPN_OVPNPEER_H_ */
diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c
new file mode 100644
index 000000000000..2f29049897e3
--- /dev/null
+++ b/drivers/net/ovpn/pktid.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid)
+{
+ atomic_set(&pid->seq_num, 1);
+}
+
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr)
+{
+ memset(pr, 0, sizeof(*pr));
+ spin_lock_init(&pr->lock);
+}
+
+/* Packet replay detection.
+ * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1.
+ */
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time)
+{
+ const unsigned long now = jiffies;
+ int ret;
+
+ /* ID must not be zero */
+ if (unlikely(pkt_id == 0))
+ return -EINVAL;
+
+ spin_lock_bh(&pr->lock);
+
+ /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */
+ if (unlikely(time_after_eq(now, pr->expire)))
+ pr->id_floor = pr->id;
+
+ /* time changed? */
+ if (unlikely(pkt_time != pr->time)) {
+ if (pkt_time > pr->time) {
+ /* time moved forward, accept */
+ pr->base = 0;
+ pr->extent = 0;
+ pr->id = 0;
+ pr->time = pkt_time;
+ pr->id_floor = 0;
+ } else {
+ /* time moved backward, reject */
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ if (likely(pkt_id == pr->id + 1)) {
+ /* well-formed ID sequence (incremented by 1) */
+ pr->base = REPLAY_INDEX(pr->base, -1);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ if (pr->extent < REPLAY_WINDOW_SIZE)
+ ++pr->extent;
+ pr->id = pkt_id;
+ } else if (pkt_id > pr->id) {
+ /* ID jumped forward by more than one */
+ const unsigned int delta = pkt_id - pr->id;
+
+ if (delta < REPLAY_WINDOW_SIZE) {
+ unsigned int i;
+
+ pr->base = REPLAY_INDEX(pr->base, -delta);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ pr->extent += delta;
+ if (pr->extent > REPLAY_WINDOW_SIZE)
+ pr->extent = REPLAY_WINDOW_SIZE;
+ for (i = 1; i < delta; ++i) {
+ unsigned int newb = REPLAY_INDEX(pr->base, i);
+
+ pr->history[newb / 8] &= ~BIT(newb % 8);
+ }
+ } else {
+ pr->base = 0;
+ pr->extent = REPLAY_WINDOW_SIZE;
+ memset(pr->history, 0, sizeof(pr->history));
+ pr->history[0] = 1;
+ }
+ pr->id = pkt_id;
+ } else {
+ /* ID backtrack */
+ const unsigned int delta = pr->id - pkt_id;
+
+ if (delta > pr->max_backtrack)
+ pr->max_backtrack = delta;
+ if (delta < pr->extent) {
+ if (pkt_id > pr->id_floor) {
+ const unsigned int ri = REPLAY_INDEX(pr->base,
+ delta);
+ u8 *p = &pr->history[ri / 8];
+ const u8 mask = (1 << (ri % 8));
+
+ if (*p & mask) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *p |= mask;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ pr->expire = now + PKTID_RECV_EXPIRE;
+ ret = 0;
+out:
+ spin_unlock_bh(&pr->lock);
+ return ret;
+}
diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h
new file mode 100644
index 000000000000..0262d026d15e
--- /dev/null
+++ b/drivers/net/ovpn/pktid.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPKTID_H_
+#define _NET_OVPN_OVPNPKTID_H_
+
+#include "proto.h"
+
+/* If no packets received for this length of time, set a backtrack floor
+ * at highest received packet ID thus far.
+ */
+#define PKTID_RECV_EXPIRE (30 * HZ)
+
+/* Packet-ID state for transmitter */
+struct ovpn_pktid_xmit {
+ atomic_t seq_num;
+};
+
+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */
+#define REPLAY_WINDOW_ORDER 8
+
+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER)
+#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8)
+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1))
+
+/* Packet-ID state for receiver.
+ * Other than lock member, can be zeroed to initialize.
+ */
+struct ovpn_pktid_recv {
+ /* "sliding window" bitmask of recent packet IDs received */
+ u8 history[REPLAY_WINDOW_BYTES];
+ /* bit position of deque base in history */
+ unsigned int base;
+ /* extent (in bits) of deque in history */
+ unsigned int extent;
+ /* expiration of history in jiffies */
+ unsigned long expire;
+ /* highest sequence number received */
+ u32 id;
+ /* highest time stamp received */
+ u32 time;
+ /* we will only accept backtrack IDs > id_floor */
+ u32 id_floor;
+ unsigned int max_backtrack;
+ /* protects entire pktd ID state */
+ spinlock_t lock;
+};
+
+/* Get the next packet ID for xmit */
+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid)
+{
+ const u32 seq_num = atomic_fetch_add_unless(&pid->seq_num, 1, 0);
+ /* when the 32bit space is over, we return an error because the packet
+ * ID is used to create the cipher IV and we do not want to reuse the
+ * same value more than once
+ */
+ if (unlikely(!seq_num))
+ return -ERANGE;
+
+ *pktid = seq_num;
+
+ return 0;
+}
+
+/* Write 12-byte AEAD IV to dest */
+static inline void ovpn_pktid_aead_write(const u32 pktid,
+ const u8 nt[],
+ unsigned char *dest)
+{
+ *(__force __be32 *)(dest) = htonl(pktid);
+ BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE);
+ memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE);
+}
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid);
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr);
+
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time);
+
+#endif /* _NET_OVPN_OVPNPKTID_H_ */
diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h
new file mode 100644
index 000000000000..b7d285b4d9c1
--- /dev/null
+++ b/drivers/net/ovpn/proto.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_PROTO_H_
+#define _NET_OVPN_PROTO_H_
+
+#include "main.h"
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+
+/* When the OpenVPN protocol is ran in AEAD mode, use
+ * the OpenVPN packet ID as the AEAD nonce:
+ *
+ * 00000005 521c3b01 4308c041
+ * [seq # ] [ nonce_tail ]
+ * [ 12-byte full IV ] -> OVPN_NONCE_SIZE
+ * [4-bytes -> OVPN_NONCE_WIRE_SIZE
+ * on wire]
+ */
+
+/* nonce size (96bits) as required by AEAD ciphers */
+#define OVPN_NONCE_SIZE 12
+/* last 8 bytes of AEAD nonce: provided by userspace and usually derived
+ * from key material generated during TLS handshake
+ */
+#define OVPN_NONCE_TAIL_SIZE 8
+
+/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the
+ * size of the AEAD Associated Data (AD) sent over the wire
+ * and is normally the head of the IV
+ */
+#define OVPN_NONCE_WIRE_SIZE (OVPN_NONCE_SIZE - OVPN_NONCE_TAIL_SIZE)
+
+#define OVPN_OPCODE_SIZE 4 /* DATA_V2 opcode size */
+#define OVPN_OPCODE_KEYID_MASK 0x07000000
+#define OVPN_OPCODE_PKTTYPE_MASK 0xF8000000
+#define OVPN_OPCODE_PEERID_MASK 0x00FFFFFF
+
+/* packet opcodes of interest to us */
+#define OVPN_DATA_V1 6 /* data channel v1 packet */
+#define OVPN_DATA_V2 9 /* data channel v2 packet */
+
+#define OVPN_PEER_ID_UNDEF 0x00FFFFFF
+
+/**
+ * ovpn_opcode_from_skb - extract OP code from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the OP code
+ */
+static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PKTTYPE_MASK, opcode);
+}
+
+/**
+ * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the peer ID
+ */
+static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode);
+}
+
+/**
+ * ovpn_key_id_from_skb - extract key ID from the skb head
+ * @skb: the packet to extract the key ID code from
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the key ID
+ */
+static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)skb->data);
+
+ return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode);
+}
+
+/**
+ * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format
+ * @opcode: the OP code
+ * @key_id: the key ID
+ * @peer_id: the peer ID
+ *
+ * Return: a 4 bytes integer obtained combining all input values following the
+ * OpenVPN wire format. This integer can then be written to the packet header.
+ */
+static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id)
+{
+ return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) |
+ FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) |
+ FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id);
+}
+
+#endif /* _NET_OVPN_OVPNPROTO_H_ */
diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h
new file mode 100644
index 000000000000..64430880f1da
--- /dev/null
+++ b/drivers/net/ovpn/skb.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SKB_H_
+#define _NET_OVPN_SKB_H_
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct ovpn_cb {
+ struct ovpn_peer *peer;
+ struct ovpn_crypto_key_slot *ks;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ u8 *iv;
+ unsigned int payload_offset;
+ bool nosignal;
+};
+
+static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
+ return (struct ovpn_cb *)skb->cb;
+}
+
+/* Return IP protocol version from skb header.
+ * Return 0 if protocol is not IPv4/IPv6 or cannot be read.
+ */
+static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb)
+{
+ __be16 proto = 0;
+
+ /* skb could be non-linear,
+ * make sure IP header is in non-fragmented part
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ if (ip_hdr(skb)->version == 4) {
+ proto = htons(ETH_P_IP);
+ } else if (ip_hdr(skb)->version == 6) {
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ proto = htons(ETH_P_IPV6);
+ }
+
+ return proto;
+}
+
+#endif /* _NET_OVPN_SKB_H_ */
diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c
new file mode 100644
index 000000000000..a83cbab72591
--- /dev/null
+++ b/drivers/net/ovpn/socket.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "socket.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_socket_release_kref(struct kref *kref)
+{
+ struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
+ refcount);
+
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ ovpn_udp_socket_detach(sock);
+ else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
+ ovpn_tcp_socket_detach(sock);
+}
+
+/**
+ * ovpn_socket_put - decrease reference counter
+ * @peer: peer whose socket reference counter should be decreased
+ * @sock: the RCU protected peer socket
+ *
+ * This function is only used internally. Users willing to release
+ * references to the ovpn_socket should use ovpn_socket_release()
+ *
+ * Return: true if the socket was released, false otherwise
+ */
+static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock)
+{
+ return kref_put(&sock->refcount, ovpn_socket_release_kref);
+}
+
+/**
+ * ovpn_socket_release - release resources owned by socket user
+ * @peer: peer whose socket should be released
+ *
+ * This function should be invoked when the peer is being removed
+ * and wants to drop its link to the socket.
+ *
+ * In case of UDP, the detach routine will drop a reference to the
+ * ovpn netdev, pointed by the ovpn_socket.
+ *
+ * In case of TCP, releasing the socket will cause dropping
+ * the refcounter for the peer it is linked to, thus allowing the peer
+ * disappear as well.
+ *
+ * This function is expected to be invoked exactly once per peer
+ *
+ * NOTE: this function may sleep
+ */
+void ovpn_socket_release(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ bool released;
+
+ might_sleep();
+
+ sock = rcu_replace_pointer(peer->sock, NULL, true);
+ /* release may be invoked after socket was detached */
+ if (!sock)
+ return;
+
+ /* sanity check: we should not end up here if the socket
+ * was already closed
+ */
+ if (!sock->sock->sk) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Drop the reference while holding the sock lock to avoid
+ * concurrent ovpn_socket_new call to mess up with a partially
+ * detached socket.
+ *
+ * Holding the lock ensures that a socket with refcnt 0 is fully
+ * detached before it can be picked by a concurrent reader.
+ */
+ lock_sock(sock->sock->sk);
+ released = ovpn_socket_put(peer, sock);
+ release_sock(sock->sock->sk);
+
+ /* align all readers with sk_user_data being NULL */
+ synchronize_rcu();
+
+ /* following cleanup should happen with lock released */
+ if (released) {
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP) {
+ netdev_put(sock->ovpn->dev, &sock->dev_tracker);
+ } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) {
+ /* wait for TCP jobs to terminate */
+ ovpn_tcp_socket_wait_finish(sock);
+ ovpn_peer_put(sock->peer);
+ }
+ /* we can call plain kfree() because we already waited one RCU
+ * period due to synchronize_rcu()
+ */
+ kfree(sock);
+ }
+}
+
+static bool ovpn_socket_hold(struct ovpn_socket *sock)
+{
+ return kref_get_unless_zero(&sock->refcount);
+}
+
+static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer)
+{
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ return ovpn_udp_socket_attach(sock, peer->ovpn);
+ else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
+ return ovpn_tcp_socket_attach(sock, peer);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_socket_new - create a new socket and initialize it
+ * @sock: the kernel socket to embed
+ * @peer: the peer reachable via this socket
+ *
+ * Return: an openvpn socket on success or a negative error code otherwise
+ */
+struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
+{
+ struct ovpn_socket *ovpn_sock;
+ int ret;
+
+ lock_sock(sock->sk);
+
+ /* a TCP socket can only be owned by a single peer, therefore there
+ * can't be any other user
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ /* a UDP socket can be shared across multiple peers, but we must make
+ * sure it is not owned by something else
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type);
+
+ /* socket owned by other encapsulation module */
+ if (type && type != UDP_ENCAP_OVPNINUDP) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ if (ovpn_sock) {
+ /* socket owned by another ovpn instance, we can't use it */
+ if (ovpn_sock->ovpn != peer->ovpn) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ /* this socket is already owned by this instance,
+ * therefore we can increase the refcounter and
+ * use it as expected
+ */
+ if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) {
+ /* this should never happen because setting
+ * the refcnt to 0 and detaching the socket
+ * is expected to be atomic
+ */
+ ovpn_sock = ERR_PTR(-EAGAIN);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ rcu_read_unlock();
+ goto sock_release;
+ }
+ rcu_read_unlock();
+ }
+
+ /* socket is not owned: attach to this ovpn instance */
+
+ ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL);
+ if (!ovpn_sock) {
+ ovpn_sock = ERR_PTR(-ENOMEM);
+ goto sock_release;
+ }
+
+ ovpn_sock->sock = sock;
+ kref_init(&ovpn_sock->refcount);
+
+ ret = ovpn_socket_attach(ovpn_sock, peer);
+ if (ret < 0) {
+ kfree(ovpn_sock);
+ ovpn_sock = ERR_PTR(ret);
+ goto sock_release;
+ }
+
+ /* TCP sockets are per-peer, therefore they are linked to their unique
+ * peer
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP) {
+ INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
+ ovpn_sock->peer = peer;
+ ovpn_peer_hold(peer);
+ } else if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ /* in UDP we only link the ovpn instance since the socket is
+ * shared among multiple peers
+ */
+ ovpn_sock->ovpn = peer->ovpn;
+ netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker,
+ GFP_KERNEL);
+ }
+
+ rcu_assign_sk_user_data(sock->sk, ovpn_sock);
+sock_release:
+ release_sock(sock->sk);
+ return ovpn_sock;
+}
diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h
new file mode 100644
index 000000000000..00d856b1a5d8
--- /dev/null
+++ b/drivers/net/ovpn/socket.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SOCK_H_
+#define _NET_OVPN_SOCK_H_
+
+#include <linux/net.h>
+#include <linux/kref.h>
+#include <net/sock.h>
+
+struct ovpn_priv;
+struct ovpn_peer;
+
+/**
+ * struct ovpn_socket - a kernel socket referenced in the ovpn code
+ * @ovpn: ovpn instance owning this socket (UDP only)
+ * @dev_tracker: reference tracker for associated dev (UDP only)
+ * @peer: unique peer transmitting over this socket (TCP only)
+ * @sock: the low level sock object
+ * @refcount: amount of contexts currently referencing this object
+ * @work: member used to schedule release routine (it may block)
+ * @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
+ */
+struct ovpn_socket {
+ union {
+ struct {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ };
+ struct ovpn_peer *peer;
+ };
+
+ struct socket *sock;
+ struct kref refcount;
+ struct work_struct work;
+ struct work_struct tcp_tx_work;
+};
+
+struct ovpn_socket *ovpn_socket_new(struct socket *sock,
+ struct ovpn_peer *peer);
+void ovpn_socket_release(struct ovpn_peer *peer);
+
+#endif /* _NET_OVPN_SOCK_H_ */
diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c
new file mode 100644
index 000000000000..d637143473bb
--- /dev/null
+++ b/drivers/net/ovpn/stats.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+
+#include "stats.h"
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps)
+{
+ atomic64_set(&ps->rx.bytes, 0);
+ atomic64_set(&ps->rx.packets, 0);
+
+ atomic64_set(&ps->tx.bytes, 0);
+ atomic64_set(&ps->tx.packets, 0);
+}
diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h
new file mode 100644
index 000000000000..53433d8b6c33
--- /dev/null
+++ b/drivers/net/ovpn/stats.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ * Lev Stipakov <lev@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTATS_H_
+#define _NET_OVPN_OVPNSTATS_H_
+
+/* one stat */
+struct ovpn_peer_stat {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+/* rx and tx stats combined */
+struct ovpn_peer_stats {
+ struct ovpn_peer_stat rx;
+ struct ovpn_peer_stat tx;
+};
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps);
+
+static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat,
+ const unsigned int n)
+{
+ atomic64_add(n, &stat->bytes);
+ atomic64_inc(&stat->packets);
+}
+
+static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->rx, n);
+}
+
+static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->tx, n);
+}
+
+#endif /* _NET_OVPN_OVPNSTATS_H_ */
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
new file mode 100644
index 000000000000..7c42d84987ad
--- /dev/null
+++ b/drivers/net/ovpn/tcp.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <net/hotdata.h>
+#include <net/inet_common.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/transp_v6.h>
+#include <net/route.h>
+#include <trace/events/sock.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+#include "tcp.h"
+
+#define OVPN_TCP_DEPTH_NESTING 2
+#if OVPN_TCP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "OVPN TCP requires its own lockdep subclass"
+#endif
+
+static struct proto ovpn_tcp_prot __ro_after_init;
+static struct proto_ops ovpn_tcp_ops __ro_after_init;
+static struct proto ovpn_tcp6_prot __ro_after_init;
+static struct proto_ops ovpn_tcp6_ops __ro_after_init;
+
+static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ __be16 blen;
+ u16 len;
+ int err;
+
+ /* when packets are written to the TCP stream, they are prepended with
+ * two bytes indicating the actual packet size.
+ * Parse accordingly and return the actual size (including the size
+ * header)
+ */
+
+ if (skb->len < rxm->offset + 2)
+ return 0;
+
+ err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen));
+ if (err < 0)
+ return err;
+
+ len = be16_to_cpu(blen);
+ if (len < 2)
+ return -EINVAL;
+
+ return len + 2;
+}
+
+/* queue skb for sending to userspace via recvmsg on the socket */
+static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb_set_owner_r(skb, sk);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb_queue_tail(&peer->tcp.user_queue, skb);
+ peer->tcp.sk_cb.sk_data_ready(sk);
+}
+
+static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp);
+ struct strp_msg *msg = strp_msg(skb);
+ size_t pkt_len = msg->full_len - 2;
+ size_t off = msg->offset + 2;
+ u8 opcode;
+
+ /* ensure skb->data points to the beginning of the openvpn packet */
+ if (!pskb_pull(skb, off)) {
+ net_warn_ratelimited("%s: packet too small for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* strparser does not trim the skb for us, therefore we do it now */
+ if (pskb_trim(skb, pkt_len) != 0) {
+ net_warn_ratelimited("%s: trimming skb failed for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* we need the first 4 bytes of data to be accessible
+ * to extract the opcode and the key ID later on
+ */
+ if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) {
+ net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* DATA_V2 packets are handled in kernel, the rest goes to user space */
+ opcode = ovpn_opcode_from_skb(skb, 0);
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ if (opcode == OVPN_DATA_V1) {
+ net_warn_ratelimited("%s: DATA_V1 detected on the TCP stream\n",
+ netdev_name(peer->ovpn->dev));
+ goto err;
+ }
+
+ /* The packet size header must be there when sending the packet
+ * to userspace, therefore we put it back
+ */
+ skb_push(skb, 2);
+ ovpn_tcp_to_userspace(peer, strp->sk, skb);
+ return;
+ }
+
+ /* hold reference to peer as required by ovpn_recv().
+ *
+ * NOTE: in this context we should already be holding a reference to
+ * this peer, therefore ovpn_peer_hold() is not expected to fail
+ */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err;
+
+ ovpn_recv(peer, skb);
+ return;
+err:
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+}
+
+static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ int err = 0, off, copied = 0, ret;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ return -EBADF;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err);
+ if (!skb) {
+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) {
+ ret = 0;
+ goto out;
+ }
+ ret = err;
+ goto out;
+ }
+
+ copied = len;
+ if (copied > skb->len)
+ copied = skb->len;
+ else if (copied < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ ret = err;
+ goto out;
+ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+ kfree_skb(skb);
+ ret = copied;
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct ovpn_peer *peer = ovpn_sock->peer;
+ struct socket *sock = ovpn_sock->sock;
+
+ strp_stop(&peer->tcp.strp);
+ skb_queue_purge(&peer->tcp.user_queue);
+
+ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
+ sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
+ sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
+ sock->sk->sk_prot = peer->tcp.sk_cb.prot;
+ sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops;
+
+ rcu_assign_sk_user_data(sock->sk, NULL);
+}
+
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
+{
+ struct ovpn_peer *peer = sock->peer;
+
+ /* NOTE: we don't wait for peer->tcp.defer_del_work to finish:
+ * either the worker is not running or this function
+ * was invoked by that worker.
+ */
+
+ cancel_work_sync(&sock->tcp_tx_work);
+ strp_done(&peer->tcp.strp);
+
+ skb_queue_purge(&peer->tcp.out_queue);
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+}
+
+static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
+{
+ struct sk_buff *skb = peer->tcp.out_msg.skb;
+ int ret, flags;
+
+ if (!skb)
+ return;
+
+ if (peer->tcp.tx_in_progress)
+ return;
+
+ peer->tcp.tx_in_progress = true;
+
+ do {
+ flags = ovpn_skb_cb(skb)->nosignal ? MSG_NOSIGNAL : 0;
+ ret = skb_send_sock_locked_with_flags(sk, skb,
+ peer->tcp.out_msg.offset,
+ peer->tcp.out_msg.len,
+ flags);
+ if (unlikely(ret < 0)) {
+ if (ret == -EAGAIN)
+ goto out;
+
+ net_warn_ratelimited("%s: TCP error to peer %u: %d\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, ret);
+
+ /* in case of TCP error we can't recover the VPN
+ * stream therefore we abort the connection
+ */
+ ovpn_peer_hold(peer);
+ schedule_work(&peer->tcp.defer_del_work);
+
+ /* we bail out immediately and keep tx_in_progress set
+ * to true. This way we prevent more TX attempts
+ * which would lead to more invocations of
+ * schedule_work()
+ */
+ return;
+ }
+
+ peer->tcp.out_msg.len -= ret;
+ peer->tcp.out_msg.offset += ret;
+ } while (peer->tcp.out_msg.len > 0);
+
+ if (!peer->tcp.out_msg.len) {
+ preempt_disable();
+ dev_dstats_tx_add(peer->ovpn->dev, skb->len);
+ preempt_enable();
+ }
+
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+ peer->tcp.out_msg.len = 0;
+ peer->tcp.out_msg.offset = 0;
+
+out:
+ peer->tcp.tx_in_progress = false;
+}
+
+void ovpn_tcp_tx_work(struct work_struct *work)
+{
+ struct ovpn_socket *sock;
+
+ sock = container_of(work, struct ovpn_socket, tcp_tx_work);
+
+ lock_sock(sock->sock->sk);
+ if (sock->peer)
+ ovpn_tcp_send_sock(sock->peer, sock->sock->sk);
+ release_sock(sock->sock->sk);
+}
+
+static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (peer->tcp.out_msg.skb)
+ ovpn_tcp_send_sock(peer, sk);
+
+ if (peer->tcp.out_msg.skb) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ return;
+ }
+
+ peer->tcp.out_msg.skb = skb;
+ peer->tcp.out_msg.len = skb->len;
+ peer->tcp.out_msg.offset = 0;
+ ovpn_tcp_send_sock(peer, sk);
+}
+
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb)
+{
+ u16 len = skb->len;
+
+ *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
+
+ spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+ if (sock_owned_by_user(sock->sk)) {
+ if (skb_queue_len(&peer->tcp.out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog)) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ goto unlock;
+ }
+ __skb_queue_tail(&peer->tcp.out_queue, skb);
+ } else {
+ ovpn_tcp_send_sock_skb(peer, sock->sk, skb);
+ }
+unlock:
+ spin_unlock(&sock->sk->sk_lock.slock);
+}
+
+static void ovpn_tcp_release(struct sock *sk)
+{
+ struct sk_buff_head queue;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock) {
+ rcu_read_unlock();
+ return;
+ }
+
+ peer = sock->peer;
+
+ /* during initialization this function is called before
+ * assigning sock->peer
+ */
+ if (unlikely(!peer || !ovpn_peer_hold(peer))) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&peer->tcp.out_queue, &queue);
+
+ while ((skb = __skb_dequeue(&queue)))
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+
+ peer->tcp.sk_cb.prot->release_cb(sk);
+ ovpn_peer_put(peer);
+}
+
+static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct ovpn_socket *sock;
+ int ret, linear = PAGE_SIZE;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return -EIO;
+ }
+ rcu_read_unlock();
+ peer = sock->peer;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL)) {
+ ret = -EOPNOTSUPP;
+ goto peer_free;
+ }
+
+ if (peer->tcp.out_msg.skb) {
+ ret = -EAGAIN;
+ goto peer_free;
+ }
+
+ if (size < linear)
+ linear = size;
+
+ skb = sock_alloc_send_pskb(sk, linear, size - linear,
+ msg->msg_flags & MSG_DONTWAIT, &ret, 0);
+ if (!skb) {
+ net_err_ratelimited("%s: skb alloc failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ skb_put(skb, linear);
+ skb->len = size;
+ skb->data_len = size - linear;
+
+ ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (ret) {
+ kfree_skb(skb);
+ net_err_ratelimited("%s: skb copy from iter failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ ovpn_skb_cb(skb)->nosignal = msg->msg_flags & MSG_NOSIGNAL;
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ ret = size;
+peer_free:
+ release_sock(sk);
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_tcp_disconnect(struct sock *sk, int flags)
+{
+ return -EBUSY;
+}
+
+static void ovpn_tcp_data_ready(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ trace_sk_data_ready(sk);
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer))
+ strp_data_ready(&sock->peer->tcp.strp);
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_write_space(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer)) {
+ schedule_work(&sock->tcp_tx_work);
+ sock->peer->tcp.sk_cb.sk_write_space(sk);
+ }
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops);
+
+static void ovpn_tcp_peer_del_work(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ tcp.defer_del_work);
+
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+ ovpn_peer_put(peer);
+}
+
+/* Set TCP encapsulation callbacks */
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer)
+{
+ struct socket *sock = ovpn_sock->sock;
+ struct strp_callbacks cb = {
+ .rcv_msg = ovpn_tcp_rcv,
+ .parse_msg = ovpn_tcp_parse,
+ };
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ if (sock->sk->sk_user_data)
+ return -EBUSY;
+
+ /* only a fully connected socket is expected. Connection should be
+ * handled in userspace
+ */
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
+ netdev_name(peer->ovpn->dev),
+ sock->sk->sk_state);
+ return -EINVAL;
+ }
+
+ ret = strp_init(&peer->tcp.strp, sock->sk, &cb);
+ if (ret < 0) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return ret;
+ }
+
+ INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
+
+ __sk_dst_reset(sock->sk);
+ skb_queue_head_init(&peer->tcp.user_queue);
+ skb_queue_head_init(&peer->tcp.out_queue);
+
+ /* save current CBs so that they can be restored upon socket release */
+ peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready;
+ peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space;
+ peer->tcp.sk_cb.prot = sock->sk->sk_prot;
+ peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops;
+
+ /* assign our static CBs and prot/ops */
+ sock->sk->sk_data_ready = ovpn_tcp_data_ready;
+ sock->sk->sk_write_space = ovpn_tcp_write_space;
+
+ if (sock->sk->sk_family == AF_INET) {
+ sock->sk->sk_prot = &ovpn_tcp_prot;
+ sock->sk->sk_socket->ops = &ovpn_tcp_ops;
+ } else {
+ sock->sk->sk_prot = &ovpn_tcp6_prot;
+ sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
+ }
+
+ /* avoid using task_frag */
+ sock->sk->sk_allocation = GFP_ATOMIC;
+ sock->sk->sk_use_task_frag = false;
+
+ /* enqueue the RX worker */
+ strp_check_rcv(&peer->tcp.strp);
+
+ return 0;
+}
+
+static void ovpn_tcp_close(struct sock *sk, long timeout)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->peer || !ovpn_peer_hold(sock->peer)) {
+ rcu_read_unlock();
+ return;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+ peer->tcp.sk_cb.prot->close(sk, timeout);
+ ovpn_peer_put(peer);
+}
+
+static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ __poll_t mask = datagram_poll(file, sock, wait);
+ struct ovpn_socket *ovpn_sock;
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ if (ovpn_sock && ovpn_sock->peer &&
+ !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ rcu_read_unlock();
+
+ return mask;
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops)
+{
+ memcpy(new_prot, orig_prot, sizeof(*new_prot));
+ memcpy(new_ops, orig_ops, sizeof(*new_ops));
+ new_prot->recvmsg = ovpn_tcp_recvmsg;
+ new_prot->sendmsg = ovpn_tcp_sendmsg;
+ new_prot->disconnect = ovpn_tcp_disconnect;
+ new_prot->close = ovpn_tcp_close;
+ new_prot->release_cb = ovpn_tcp_release;
+ new_ops->poll = ovpn_tcp_poll;
+}
+
+/* Initialize TCP static objects */
+void __init ovpn_tcp_init(void)
+{
+ ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot,
+ &inet_stream_ops);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot,
+ &inet6_stream_ops);
+#endif
+}
diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h
new file mode 100644
index 000000000000..10aefa834cf3
--- /dev/null
+++ b/drivers/net/ovpn/tcp.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_TCP_H_
+#define _NET_OVPN_TCP_H_
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "peer.h"
+#include "skb.h"
+#include "socket.h"
+
+void __init ovpn_tcp_init(void);
+
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer);
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock);
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
+
+/* Prepare skb and enqueue it for sending to peer.
+ *
+ * Preparation consist in prepending the skb payload with its size.
+ * Required by the OpenVPN protocol in order to extract packets from
+ * the TCP stream on the receiver side.
+ */
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb);
+void ovpn_tcp_tx_work(struct work_struct *work);
+
+#endif /* _NET_OVPN_TCP_H_ */
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
new file mode 100644
index 000000000000..c9e189056f33
--- /dev/null
+++ b/drivers/net/ovpn/udp.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <net/addrconf.h>
+#include <net/dst_cache.h>
+#include <net/route.h>
+#include <net/ipv6_stubs.h>
+#include <net/transp_v6.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "bind.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "socket.h"
+#include "udp.h"
+
+/* Retrieve the corresponding ovpn object from a UDP socket
+ * rcu_read_lock must be held on entry
+ */
+static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
+{
+ struct ovpn_socket *ovpn_sock;
+
+ if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
+ return NULL;
+
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!ovpn_sock))
+ return NULL;
+
+ /* make sure that sk matches our stored transport socket */
+ if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk))
+ return NULL;
+
+ return ovpn_sock;
+}
+
+/**
+ * ovpn_udp_encap_recv - Start processing a received UDP packet.
+ * @sk: socket over which the packet was received
+ * @skb: the received packet
+ *
+ * If the first byte of the payload is:
+ * - DATA_V2 the packet is accepted for further processing,
+ * - DATA_V1 the packet is dropped as not supported,
+ * - anything else the packet is forwarded to the UDP stack for
+ * delivery to user space.
+ *
+ * Return:
+ * 0 if skb was consumed or dropped
+ * >0 if skb should be passed up to userspace as UDP (packet not consumed)
+ * <0 if skb should be resubmitted as proto -N (packet not consumed)
+ */
+static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ u8 opcode;
+
+ ovpn_sock = ovpn_socket_from_udp_sock(sk);
+ if (unlikely(!ovpn_sock)) {
+ net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
+ __func__);
+ goto drop_noovpn;
+ }
+
+ ovpn = ovpn_sock->ovpn;
+ if (unlikely(!ovpn)) {
+ net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
+ goto drop_noovpn;
+ }
+
+ /* Make sure the first 4 bytes of the skb data buffer after the UDP
+ * header are accessible.
+ * They are required to fetch the OP code, the key ID and the peer ID.
+ */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
+ OVPN_OPCODE_SIZE))) {
+ net_dbg_ratelimited("%s: packet too small from UDP socket\n",
+ netdev_name(ovpn->dev));
+ goto drop;
+ }
+
+ opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ /* DATA_V1 is not supported */
+ if (opcode == OVPN_DATA_V1)
+ goto drop;
+
+ /* unknown or control packet: let it bubble up to userspace */
+ return 1;
+ }
+
+ peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
+ /* some OpenVPN server implementations send data packets with the
+ * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id
+ * and we try with the transport address
+ */
+ if (peer_id == OVPN_PEER_ID_UNDEF)
+ peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
+ else
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+
+ if (unlikely(!peer))
+ goto drop;
+
+ /* pop off outer UDP header */
+ __skb_pull(skb, sizeof(struct udphdr));
+ ovpn_recv(peer, skb);
+ return 0;
+
+drop:
+ dev_dstats_rx_dropped(ovpn->dev);
+drop_noovpn:
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * ovpn_udp4_output - send IPv4 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .saddr = bind->local.ipv4.s_addr,
+ .daddr = bind->remote.in4.sin_addr.s_addr,
+ .fl4_sport = inet_sk(sk)->inet_sport,
+ .fl4_dport = bind->remote.in4.sin_port,
+ .flowi4_proto = sk->sk_protocol,
+ .flowi4_mark = sk->sk_mark,
+ };
+ int ret;
+
+ local_bh_disable();
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+ if (rt)
+ goto transmit;
+
+ if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
+ RT_SCOPE_HOST))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ }
+
+ if (IS_ERR(rt)) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in4,
+ ret);
+ goto err;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+
+transmit:
+ udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, sk->sk_no_check_tx);
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * ovpn_udp6_output - send IPv6 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ int ret;
+
+ struct flowi6 fl = {
+ .saddr = bind->local.ipv6,
+ .daddr = bind->remote.in6.sin6_addr,
+ .fl6_sport = inet_sk(sk)->inet_sport,
+ .fl6_dport = bind->remote.in6.sin6_port,
+ .flowi6_proto = sk->sk_protocol,
+ .flowi6_mark = sk->sk_mark,
+ .flowi6_oif = bind->remote.in6.sin6_scope_id,
+ };
+
+ local_bh_disable();
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+ if (dst)
+ goto transmit;
+
+ if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = in6addr_any;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv6 = in6addr_any;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in6, ret);
+ goto err;
+ }
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+
+transmit:
+ udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, udp_get_no_check6_tx(sk));
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+#endif
+
+/**
+ * ovpn_udp_output - transmit skb using udp-tunnel
+ * @peer: the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * rcu_read_lock should be held on entry.
+ * On return, the skb is consumed.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_bind *bind;
+ int ret;
+
+ /* set sk to null if skb is already orphaned */
+ if (!skb->destructor)
+ skb->sk = NULL;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (unlikely(!bind)) {
+ net_warn_ratelimited("%s: no bind for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ovpn_udp_send_skb - prepare skb and send it over via UDP
+ * @peer: the destination peer
+ * @sock: the RCU protected peer socket
+ * @skb: the packet to send
+ */
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb)
+{
+ int ret = -1;
+
+ skb->dev = peer->ovpn->dev;
+ /* no checksum performed at this layer */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* get socket info */
+ if (unlikely(!sock)) {
+ net_warn_ratelimited("%s: no sock for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto out;
+ }
+
+ /* crypto layer -> transport (UDP) */
+ ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb);
+out:
+ if (unlikely(ret < 0)) {
+ kfree_skb(skb);
+ return;
+ }
+}
+
+static void ovpn_udp_encap_destroy(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_priv *ovpn;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->ovpn) {
+ rcu_read_unlock();
+ return;
+ }
+ ovpn = sock->ovpn;
+ rcu_read_unlock();
+
+ ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+}
+
+/**
+ * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
+ * @ovpn_sock: socket to configure
+ * @ovpn: the openvp instance to link
+ *
+ * After invoking this function, the sock will be controlled by ovpn so that
+ * any incoming packet may be processed by ovpn first.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_priv *ovpn)
+{
+ struct udp_tunnel_sock_cfg cfg = {
+ .encap_type = UDP_ENCAP_OVPNINUDP,
+ .encap_rcv = ovpn_udp_encap_recv,
+ .encap_destroy = ovpn_udp_encap_destroy,
+ };
+ struct socket *sock = ovpn_sock->sock;
+ struct ovpn_socket *old_data;
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ rcu_read_lock();
+ old_data = rcu_dereference_sk_user_data(sock->sk);
+ if (!old_data) {
+ /* socket is currently unused - we can take it */
+ rcu_read_unlock();
+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg);
+ return 0;
+ }
+
+ /* socket is in use. We need to understand if it's owned by this ovpn
+ * instance or by something else.
+ * In the former case, we can increase the refcounter and happily
+ * use it, because the same UDP socket is expected to be shared among
+ * different peers.
+ *
+ * Unlikely TCP, a single UDP socket can be used to talk to many remote
+ * hosts and therefore openvpn instantiates one only for all its peers
+ */
+ if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
+ old_data->ovpn == ovpn) {
+ netdev_dbg(ovpn->dev,
+ "provided socket already owned by this interface\n");
+ ret = -EALREADY;
+ } else {
+ netdev_dbg(ovpn->dev,
+ "provided socket already taken by other user\n");
+ ret = -EBUSY;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * ovpn_udp_socket_detach - clean udp-tunnel status for this socket
+ * @ovpn_sock: the socket to clean
+ */
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct udp_tunnel_sock_cfg cfg = { };
+
+ setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock,
+ &cfg);
+}
diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h
new file mode 100644
index 000000000000..9994eb6e0428
--- /dev/null
+++ b/drivers/net/ovpn/udp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_UDP_H_
+#define _NET_OVPN_UDP_H_
+
+#include <net/sock.h>
+
+struct ovpn_peer;
+struct ovpn_priv;
+struct socket;
+
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_priv *ovpn);
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
+
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb);
+
+#endif /* _NET_OVPN_UDP_H_ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f1381aff0f89..beb084ee4f4d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4145,6 +4145,8 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
+ int offset, int len, int flags);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 318386cc5b0d..3ad2d5d98034 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1986,4 +1986,19 @@ enum {
#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1)
+/* OVPN section */
+
+enum ovpn_mode {
+ OVPN_MODE_P2P,
+ OVPN_MODE_MP,
+};
+
+enum {
+ IFLA_OVPN_UNSPEC,
+ IFLA_OVPN_MODE,
+ __IFLA_OVPN_MAX,
+};
+
+#define IFLA_OVPN_MAX (__IFLA_OVPN_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/ovpn.h b/include/uapi/linux/ovpn.h
new file mode 100644
index 000000000000..680d1522dc87
--- /dev/null
+++ b/include/uapi/linux/ovpn.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_OVPN_H
+#define _UAPI_LINUX_OVPN_H
+
+#define OVPN_FAMILY_NAME "ovpn"
+#define OVPN_FAMILY_VERSION 1
+
+#define OVPN_NONCE_TAIL_SIZE 8
+
+enum ovpn_cipher_alg {
+ OVPN_CIPHER_ALG_NONE,
+ OVPN_CIPHER_ALG_AES_GCM,
+ OVPN_CIPHER_ALG_CHACHA20_POLY1305,
+};
+
+enum ovpn_del_peer_reason {
+ OVPN_DEL_PEER_REASON_TEARDOWN,
+ OVPN_DEL_PEER_REASON_USERSPACE,
+ OVPN_DEL_PEER_REASON_EXPIRED,
+ OVPN_DEL_PEER_REASON_TRANSPORT_ERROR,
+ OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT,
+};
+
+enum ovpn_key_slot {
+ OVPN_KEY_SLOT_PRIMARY,
+ OVPN_KEY_SLOT_SECONDARY,
+};
+
+enum {
+ OVPN_A_PEER_ID = 1,
+ OVPN_A_PEER_REMOTE_IPV4,
+ OVPN_A_PEER_REMOTE_IPV6,
+ OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ OVPN_A_PEER_REMOTE_PORT,
+ OVPN_A_PEER_SOCKET,
+ OVPN_A_PEER_SOCKET_NETNSID,
+ OVPN_A_PEER_VPN_IPV4,
+ OVPN_A_PEER_VPN_IPV6,
+ OVPN_A_PEER_LOCAL_IPV4,
+ OVPN_A_PEER_LOCAL_IPV6,
+ OVPN_A_PEER_LOCAL_PORT,
+ OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ OVPN_A_PEER_DEL_REASON,
+ OVPN_A_PEER_VPN_RX_BYTES,
+ OVPN_A_PEER_VPN_TX_BYTES,
+ OVPN_A_PEER_VPN_RX_PACKETS,
+ OVPN_A_PEER_VPN_TX_PACKETS,
+ OVPN_A_PEER_LINK_RX_BYTES,
+ OVPN_A_PEER_LINK_TX_BYTES,
+ OVPN_A_PEER_LINK_RX_PACKETS,
+ OVPN_A_PEER_LINK_TX_PACKETS,
+
+ __OVPN_A_PEER_MAX,
+ OVPN_A_PEER_MAX = (__OVPN_A_PEER_MAX - 1)
+};
+
+enum {
+ OVPN_A_KEYCONF_PEER_ID = 1,
+ OVPN_A_KEYCONF_SLOT,
+ OVPN_A_KEYCONF_KEY_ID,
+ OVPN_A_KEYCONF_CIPHER_ALG,
+ OVPN_A_KEYCONF_ENCRYPT_DIR,
+ OVPN_A_KEYCONF_DECRYPT_DIR,
+
+ __OVPN_A_KEYCONF_MAX,
+ OVPN_A_KEYCONF_MAX = (__OVPN_A_KEYCONF_MAX - 1)
+};
+
+enum {
+ OVPN_A_KEYDIR_CIPHER_KEY = 1,
+ OVPN_A_KEYDIR_NONCE_TAIL,
+
+ __OVPN_A_KEYDIR_MAX,
+ OVPN_A_KEYDIR_MAX = (__OVPN_A_KEYDIR_MAX - 1)
+};
+
+enum {
+ OVPN_A_IFINDEX = 1,
+ OVPN_A_PEER,
+ OVPN_A_KEYCONF,
+
+ __OVPN_A_MAX,
+ OVPN_A_MAX = (__OVPN_A_MAX - 1)
+};
+
+enum {
+ OVPN_CMD_PEER_NEW = 1,
+ OVPN_CMD_PEER_SET,
+ OVPN_CMD_PEER_GET,
+ OVPN_CMD_PEER_DEL,
+ OVPN_CMD_PEER_DEL_NTF,
+ OVPN_CMD_KEY_NEW,
+ OVPN_CMD_KEY_GET,
+ OVPN_CMD_KEY_SWAP,
+ OVPN_CMD_KEY_SWAP_NTF,
+ OVPN_CMD_KEY_DEL,
+
+ __OVPN_CMD_MAX,
+ OVPN_CMD_MAX = (__OVPN_CMD_MAX - 1)
+};
+
+#define OVPN_MCGRP_PEERS "peers"
+
+#endif /* _UAPI_LINUX_OVPN_H */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index d85d671deed3..edca3e430305 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -43,5 +43,6 @@ struct udphdr {
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#define UDP_ENCAP_RXRPC 6
#define TCP_ENCAP_ESPINTCP 7 /* Yikes, this is really xfrm encap types. */
+#define UDP_ENCAP_OVPNINUDP 8 /* OpenVPN traffic */
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 74a2d886a35b..d73ad79fe739 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3227,7 +3227,7 @@ static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
- int len, sendmsg_func sendmsg)
+ int len, sendmsg_func sendmsg, int flags)
{
unsigned int orig_len = len;
struct sk_buff *head = skb;
@@ -3245,7 +3245,7 @@ do_frag_list:
kv.iov_base = skb->data + offset;
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
- msg.msg_flags = MSG_DONTWAIT;
+ msg.msg_flags = MSG_DONTWAIT | flags;
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
@@ -3282,7 +3282,8 @@ do_frag_list:
while (slen) {
struct bio_vec bvec;
struct msghdr msg = {
- .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT,
+ .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT |
+ flags,
};
bvec_set_page(&bvec, skb_frag_page(frag), slen,
@@ -3328,14 +3329,21 @@ error:
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len)
{
- return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
+ return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0);
}
EXPORT_SYMBOL_GPL(skb_send_sock_locked);
+int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
+ int offset, int len, int flags)
+{
+ return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags);
+}
+EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags);
+
/* Send skb data on a socket. Socket must be unlocked. */
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
{
- return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
+ return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0);
}
/**
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 85bf681d427b..acaff1296783 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -715,6 +715,7 @@ const struct proto_ops inet6_stream_ops = {
#endif
.set_rcvlowat = tcp_set_rcvlowat,
};
+EXPORT_SYMBOL_GPL(inet6_stream_ops);
const struct proto_ops inet6_dgram_ops = {
.family = PF_INET6,
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c77c8c8e3d9b..61bb8bf1b507 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -71,6 +71,7 @@ TARGETS += net/hsr
TARGETS += net/mptcp
TARGETS += net/netfilter
TARGETS += net/openvswitch
+TARGETS += net/ovpn
TARGETS += net/packetdrill
TARGETS += net/rds
TARGETS += net/tcp_ao
diff --git a/tools/testing/selftests/net/ovpn/.gitignore b/tools/testing/selftests/net/ovpn/.gitignore
new file mode 100644
index 000000000000..ee44c081ca7c
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0+
+ovpn-cli
diff --git a/tools/testing/selftests/net/ovpn/Makefile b/tools/testing/selftests/net/ovpn/Makefile
new file mode 100644
index 000000000000..2d102878cb6d
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+CFLAGS = -pedantic -Wextra -Wall -Wl,--no-as-needed -g -O0 -ggdb $(KHDR_INCLUDES)
+VAR_CFLAGS = $(shell pkg-config --cflags libnl-3.0 libnl-genl-3.0 2>/dev/null)
+ifeq ($(VAR_CFLAGS),)
+VAR_CFLAGS = -I/usr/include/libnl3
+endif
+CFLAGS += $(VAR_CFLAGS)
+
+
+LDLIBS = -lmbedtls -lmbedcrypto
+VAR_LDLIBS = $(shell pkg-config --libs libnl-3.0 libnl-genl-3.0 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS = -lnl-genl-3 -lnl-3
+endif
+LDLIBS += $(VAR_LDLIBS)
+
+
+TEST_FILES = common.sh
+
+TEST_PROGS = test.sh \
+ test-chachapoly.sh \
+ test-tcp.sh \
+ test-float.sh \
+ test-close-socket.sh \
+ test-close-socket-tcp.sh
+
+TEST_GEN_FILES := ovpn-cli
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/ovpn/common.sh b/tools/testing/selftests/net/ovpn/common.sh
new file mode 100644
index 000000000000..7502292a1ee0
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/common.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt}
+TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt}
+OVPN_CLI=${OVPN_CLI:-./ovpn-cli}
+ALG=${ALG:-aes}
+PROTO=${PROTO:-UDP}
+FLOAT=${FLOAT:-0}
+
+create_ns() {
+ ip netns add peer${1}
+}
+
+setup_ns() {
+ MODE="P2P"
+
+ if [ ${1} -eq 0 ]; then
+ MODE="MP"
+ for p in $(seq 1 ${NUM_PEERS}); do
+ ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p}
+
+ ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p}
+ ip -n peer0 link set veth${p} up
+
+ ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p}
+ ip -n peer${p} link set veth${p} up
+ done
+ fi
+
+ ip netns exec peer${1} ${OVPN_CLI} new_iface tun${1} $MODE
+ ip -n peer${1} addr add ${2} dev tun${1}
+ ip -n peer${1} link set tun${1} up
+}
+
+add_peer() {
+ if [ "${PROTO}" == "UDP" ]; then
+ if [ ${1} -eq 0 ]; then
+ ip netns exec peer0 ${OVPN_CLI} new_multi_peer tun0 1 ${UDP_PEERS_FILE}
+
+ for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 ${ALG} 0 \
+ data64.key
+ done
+ else
+ ip netns exec peer${1} ${OVPN_CLI} new_peer tun${1} ${1} 1 10.10.${1}.1 1
+ ip netns exec peer${1} ${OVPN_CLI} new_key tun${1} ${1} 1 0 ${ALG} 1 \
+ data64.key
+ fi
+ else
+ if [ ${1} -eq 0 ]; then
+ (ip netns exec peer0 ${OVPN_CLI} listen tun0 1 ${TCP_PEERS_FILE} && {
+ for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 \
+ ${ALG} 0 data64.key
+ done
+ }) &
+ sleep 5
+ else
+ ip netns exec peer${1} ${OVPN_CLI} connect tun${1} ${1} 10.10.${1}.1 1 \
+ data64.key
+ fi
+ fi
+}
+
+cleanup() {
+ # some ovpn-cli processes sleep in background so they need manual poking
+ killall $(basename ${OVPN_CLI}) 2>/dev/null || true
+
+ # netns peer0 is deleted without erasing ifaces first
+ for p in $(seq 1 10); do
+ ip -n peer${p} link set tun${p} down 2>/dev/null || true
+ ip netns exec peer${p} ${OVPN_CLI} del_iface tun${p} 2>/dev/null || true
+ done
+ for p in $(seq 1 10); do
+ ip -n peer0 link del veth${p} 2>/dev/null || true
+ done
+ for p in $(seq 0 10); do
+ ip netns del peer${p} 2>/dev/null || true
+ done
+}
+
+if [ "${PROTO}" == "UDP" ]; then
+ NUM_PEERS=${NUM_PEERS:-$(wc -l ${UDP_PEERS_FILE} | awk '{print $1}')}
+else
+ NUM_PEERS=${NUM_PEERS:-$(wc -l ${TCP_PEERS_FILE} | awk '{print $1}')}
+fi
+
+
diff --git a/tools/testing/selftests/net/ovpn/config b/tools/testing/selftests/net/ovpn/config
new file mode 100644
index 000000000000..71946ba9fa17
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/config
@@ -0,0 +1,10 @@
+CONFIG_NET=y
+CONFIG_INET=y
+CONFIG_STREAM_PARSER=y
+CONFIG_NET_UDP_TUNNEL=y
+CONFIG_DST_CACHE=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_OVPN=m
diff --git a/tools/testing/selftests/net/ovpn/data64.key b/tools/testing/selftests/net/ovpn/data64.key
new file mode 100644
index 000000000000..a99e88c4e290
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/data64.key
@@ -0,0 +1,5 @@
+jRqMACN7d7/aFQNT8S7jkrBD8uwrgHbG5OQZP2eu4R1Y7tfpS2bf5RHv06Vi163CGoaIiTX99R3B
+ia9ycAH8Wz1+9PWv51dnBLur9jbShlgZ2QHLtUc4a/gfT7zZwULXuuxdLnvR21DDeMBaTbkgbai9
+uvAa7ne1liIgGFzbv+Bas4HDVrygxIxuAnP5Qgc3648IJkZ0QEXPF+O9f0n5+QIvGCxkAUVx+5K6
+KIs+SoeWXnAopELmoGSjUpFtJbagXK82HfdqpuUxT2Tnuef0/14SzVE/vNleBNu2ZbyrSAaah8tE
+BofkPJUBFY+YQcfZNM5Dgrw3i+Bpmpq/gpdg5w==
diff --git a/tools/testing/selftests/net/ovpn/ovpn-cli.c b/tools/testing/selftests/net/ovpn/ovpn-cli.c
new file mode 100644
index 000000000000..69e41fc07fbc
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/ovpn-cli.c
@@ -0,0 +1,2376 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel accelerator
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <time.h>
+
+#include <linux/ovpn.h>
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+#include <netlink/socket.h>
+#include <netlink/netlink.h>
+#include <netlink/genl/genl.h>
+#include <netlink/genl/family.h>
+#include <netlink/genl/ctrl.h>
+
+#include <mbedtls/base64.h>
+#include <mbedtls/error.h>
+
+#include <sys/socket.h>
+
+/* defines to make checkpatch happy */
+#define strscpy strncpy
+#define __always_unused __attribute__((__unused__))
+
+/* libnl < 3.5.0 does not set the NLA_F_NESTED on its own, therefore we
+ * have to explicitly do it to prevent the kernel from failing upon
+ * parsing of the message
+ */
+#define nla_nest_start(_msg, _type) \
+ nla_nest_start(_msg, (_type) | NLA_F_NESTED)
+
+/* libnl < 3.11.0 does not implement nla_get_uint() */
+uint64_t ovpn_nla_get_uint(struct nlattr *attr)
+{
+ if (nla_len(attr) == sizeof(uint32_t))
+ return nla_get_u32(attr);
+ else
+ return nla_get_u64(attr);
+}
+
+typedef int (*ovpn_nl_cb)(struct nl_msg *msg, void *arg);
+
+enum ovpn_key_direction {
+ KEY_DIR_IN = 0,
+ KEY_DIR_OUT,
+};
+
+#define KEY_LEN (256 / 8)
+#define NONCE_LEN 8
+
+#define PEER_ID_UNDEF 0x00FFFFFF
+#define MAX_PEERS 10
+
+struct nl_ctx {
+ struct nl_sock *nl_sock;
+ struct nl_msg *nl_msg;
+ struct nl_cb *nl_cb;
+
+ int ovpn_dco_id;
+};
+
+enum ovpn_cmd {
+ CMD_INVALID,
+ CMD_NEW_IFACE,
+ CMD_DEL_IFACE,
+ CMD_LISTEN,
+ CMD_CONNECT,
+ CMD_NEW_PEER,
+ CMD_NEW_MULTI_PEER,
+ CMD_SET_PEER,
+ CMD_DEL_PEER,
+ CMD_GET_PEER,
+ CMD_NEW_KEY,
+ CMD_DEL_KEY,
+ CMD_GET_KEY,
+ CMD_SWAP_KEYS,
+ CMD_LISTEN_MCAST,
+};
+
+struct ovpn_ctx {
+ enum ovpn_cmd cmd;
+
+ __u8 key_enc[KEY_LEN];
+ __u8 key_dec[KEY_LEN];
+ __u8 nonce[NONCE_LEN];
+
+ enum ovpn_cipher_alg cipher;
+
+ sa_family_t sa_family;
+
+ unsigned long peer_id;
+ unsigned long lport;
+
+ union {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+ } remote;
+
+ union {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+ } peer_ip;
+
+ bool peer_ip_set;
+
+ unsigned int ifindex;
+ char ifname[IFNAMSIZ];
+ enum ovpn_mode mode;
+ bool mode_set;
+
+ int socket;
+ int cli_sockets[MAX_PEERS];
+
+ __u32 keepalive_interval;
+ __u32 keepalive_timeout;
+
+ enum ovpn_key_direction key_dir;
+ enum ovpn_key_slot key_slot;
+ int key_id;
+
+ const char *peers_file;
+};
+
+static int ovpn_nl_recvmsgs(struct nl_ctx *ctx)
+{
+ int ret;
+
+ ret = nl_recvmsgs(ctx->nl_sock, ctx->nl_cb);
+
+ switch (ret) {
+ case -NLE_INTR:
+ fprintf(stderr,
+ "netlink received interrupt due to signal - ignoring\n");
+ break;
+ case -NLE_NOMEM:
+ fprintf(stderr, "netlink out of memory error\n");
+ break;
+ case -NLE_AGAIN:
+ fprintf(stderr,
+ "netlink reports blocking read - aborting wait\n");
+ break;
+ default:
+ if (ret)
+ fprintf(stderr, "netlink reports error (%d): %s\n",
+ ret, nl_geterror(-ret));
+ break;
+ }
+
+ return ret;
+}
+
+static struct nl_ctx *nl_ctx_alloc_flags(struct ovpn_ctx *ovpn, int cmd,
+ int flags)
+{
+ struct nl_ctx *ctx;
+ int err, ret;
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx)
+ return NULL;
+
+ ctx->nl_sock = nl_socket_alloc();
+ if (!ctx->nl_sock) {
+ fprintf(stderr, "cannot allocate netlink socket\n");
+ goto err_free;
+ }
+
+ nl_socket_set_buffer_size(ctx->nl_sock, 8192, 8192);
+
+ ret = genl_connect(ctx->nl_sock);
+ if (ret) {
+ fprintf(stderr, "cannot connect to generic netlink: %s\n",
+ nl_geterror(ret));
+ goto err_sock;
+ }
+
+ /* enable Extended ACK for detailed error reporting */
+ err = 1;
+ setsockopt(nl_socket_get_fd(ctx->nl_sock), SOL_NETLINK, NETLINK_EXT_ACK,
+ &err, sizeof(err));
+
+ ctx->ovpn_dco_id = genl_ctrl_resolve(ctx->nl_sock, OVPN_FAMILY_NAME);
+ if (ctx->ovpn_dco_id < 0) {
+ fprintf(stderr, "cannot find ovpn_dco netlink component: %d\n",
+ ctx->ovpn_dco_id);
+ goto err_free;
+ }
+
+ ctx->nl_msg = nlmsg_alloc();
+ if (!ctx->nl_msg) {
+ fprintf(stderr, "cannot allocate netlink message\n");
+ goto err_sock;
+ }
+
+ ctx->nl_cb = nl_cb_alloc(NL_CB_DEFAULT);
+ if (!ctx->nl_cb) {
+ fprintf(stderr, "failed to allocate netlink callback\n");
+ goto err_msg;
+ }
+
+ nl_socket_set_cb(ctx->nl_sock, ctx->nl_cb);
+
+ genlmsg_put(ctx->nl_msg, 0, 0, ctx->ovpn_dco_id, 0, flags, cmd, 0);
+
+ if (ovpn->ifindex > 0)
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_IFINDEX, ovpn->ifindex);
+
+ return ctx;
+nla_put_failure:
+err_msg:
+ nlmsg_free(ctx->nl_msg);
+err_sock:
+ nl_socket_free(ctx->nl_sock);
+err_free:
+ free(ctx);
+ return NULL;
+}
+
+static struct nl_ctx *nl_ctx_alloc(struct ovpn_ctx *ovpn, int cmd)
+{
+ return nl_ctx_alloc_flags(ovpn, cmd, 0);
+}
+
+static void nl_ctx_free(struct nl_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ nl_socket_free(ctx->nl_sock);
+ nlmsg_free(ctx->nl_msg);
+ nl_cb_put(ctx->nl_cb);
+ free(ctx);
+}
+
+static int ovpn_nl_cb_error(struct sockaddr_nl (*nla)__always_unused,
+ struct nlmsgerr *err, void *arg)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *)err - 1;
+ struct nlattr *tb_msg[NLMSGERR_ATTR_MAX + 1];
+ int len = nlh->nlmsg_len;
+ struct nlattr *attrs;
+ int *ret = arg;
+ int ack_len = sizeof(*nlh) + sizeof(int) + sizeof(*nlh);
+
+ *ret = err->error;
+
+ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS))
+ return NL_STOP;
+
+ if (!(nlh->nlmsg_flags & NLM_F_CAPPED))
+ ack_len += err->msg.nlmsg_len - sizeof(*nlh);
+
+ if (len <= ack_len)
+ return NL_STOP;
+
+ attrs = (void *)((uint8_t *)nlh + ack_len);
+ len -= ack_len;
+
+ nla_parse(tb_msg, NLMSGERR_ATTR_MAX, attrs, len, NULL);
+ if (tb_msg[NLMSGERR_ATTR_MSG]) {
+ len = strnlen((char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG]),
+ nla_len(tb_msg[NLMSGERR_ATTR_MSG]));
+ fprintf(stderr, "kernel error: %*s\n", len,
+ (char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG]));
+ }
+
+ if (tb_msg[NLMSGERR_ATTR_MISS_NEST]) {
+ fprintf(stderr, "missing required nesting type %u\n",
+ nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_NEST]));
+ }
+
+ if (tb_msg[NLMSGERR_ATTR_MISS_TYPE]) {
+ fprintf(stderr, "missing required attribute type %u\n",
+ nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_TYPE]));
+ }
+
+ return NL_STOP;
+}
+
+static int ovpn_nl_cb_finish(struct nl_msg (*msg)__always_unused,
+ void *arg)
+{
+ int *status = arg;
+
+ *status = 0;
+ return NL_SKIP;
+}
+
+static int ovpn_nl_cb_ack(struct nl_msg (*msg)__always_unused,
+ void *arg)
+{
+ int *status = arg;
+
+ *status = 0;
+ return NL_STOP;
+}
+
+static int ovpn_nl_msg_send(struct nl_ctx *ctx, ovpn_nl_cb cb)
+{
+ int status = 1;
+
+ nl_cb_err(ctx->nl_cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &status);
+ nl_cb_set(ctx->nl_cb, NL_CB_FINISH, NL_CB_CUSTOM, ovpn_nl_cb_finish,
+ &status);
+ nl_cb_set(ctx->nl_cb, NL_CB_ACK, NL_CB_CUSTOM, ovpn_nl_cb_ack, &status);
+
+ if (cb)
+ nl_cb_set(ctx->nl_cb, NL_CB_VALID, NL_CB_CUSTOM, cb, ctx);
+
+ nl_send_auto_complete(ctx->nl_sock, ctx->nl_msg);
+
+ while (status == 1)
+ ovpn_nl_recvmsgs(ctx);
+
+ if (status < 0)
+ fprintf(stderr, "failed to send netlink message: %s (%d)\n",
+ strerror(-status), status);
+
+ return status;
+}
+
+static int ovpn_parse_key(const char *file, struct ovpn_ctx *ctx)
+{
+ int idx_enc, idx_dec, ret = -1;
+ unsigned char *ckey = NULL;
+ __u8 *bkey = NULL;
+ size_t olen = 0;
+ long ckey_len;
+ FILE *fp;
+
+ fp = fopen(file, "r");
+ if (!fp) {
+ fprintf(stderr, "cannot open: %s\n", file);
+ return -1;
+ }
+
+ /* get file size */
+ fseek(fp, 0L, SEEK_END);
+ ckey_len = ftell(fp);
+ rewind(fp);
+
+ /* if the file is longer, let's just read a portion */
+ if (ckey_len > 256)
+ ckey_len = 256;
+
+ ckey = malloc(ckey_len);
+ if (!ckey)
+ goto err;
+
+ ret = fread(ckey, 1, ckey_len, fp);
+ if (ret != ckey_len) {
+ fprintf(stderr,
+ "couldn't read enough data from key file: %dbytes read\n",
+ ret);
+ goto err;
+ }
+
+ olen = 0;
+ ret = mbedtls_base64_decode(NULL, 0, &olen, ckey, ckey_len);
+ if (ret != MBEDTLS_ERR_BASE64_BUFFER_TOO_SMALL) {
+ char buf[256];
+
+ mbedtls_strerror(ret, buf, sizeof(buf));
+ fprintf(stderr, "unexpected base64 error1: %s (%d)\n", buf,
+ ret);
+
+ goto err;
+ }
+
+ bkey = malloc(olen);
+ if (!bkey) {
+ fprintf(stderr, "cannot allocate binary key buffer\n");
+ goto err;
+ }
+
+ ret = mbedtls_base64_decode(bkey, olen, &olen, ckey, ckey_len);
+ if (ret) {
+ char buf[256];
+
+ mbedtls_strerror(ret, buf, sizeof(buf));
+ fprintf(stderr, "unexpected base64 error2: %s (%d)\n", buf,
+ ret);
+
+ goto err;
+ }
+
+ if (olen < 2 * KEY_LEN + NONCE_LEN) {
+ fprintf(stderr,
+ "not enough data in key file, found %zdB but needs %dB\n",
+ olen, 2 * KEY_LEN + NONCE_LEN);
+ goto err;
+ }
+
+ switch (ctx->key_dir) {
+ case KEY_DIR_IN:
+ idx_enc = 0;
+ idx_dec = 1;
+ break;
+ case KEY_DIR_OUT:
+ idx_enc = 1;
+ idx_dec = 0;
+ break;
+ default:
+ goto err;
+ }
+
+ memcpy(ctx->key_enc, bkey + KEY_LEN * idx_enc, KEY_LEN);
+ memcpy(ctx->key_dec, bkey + KEY_LEN * idx_dec, KEY_LEN);
+ memcpy(ctx->nonce, bkey + 2 * KEY_LEN, NONCE_LEN);
+
+ ret = 0;
+
+err:
+ fclose(fp);
+ free(bkey);
+ free(ckey);
+
+ return ret;
+}
+
+static int ovpn_parse_cipher(const char *cipher, struct ovpn_ctx *ctx)
+{
+ if (strcmp(cipher, "aes") == 0)
+ ctx->cipher = OVPN_CIPHER_ALG_AES_GCM;
+ else if (strcmp(cipher, "chachapoly") == 0)
+ ctx->cipher = OVPN_CIPHER_ALG_CHACHA20_POLY1305;
+ else if (strcmp(cipher, "none") == 0)
+ ctx->cipher = OVPN_CIPHER_ALG_NONE;
+ else
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static int ovpn_parse_key_direction(const char *dir, struct ovpn_ctx *ctx)
+{
+ int in_dir;
+
+ in_dir = strtoll(dir, NULL, 10);
+ switch (in_dir) {
+ case KEY_DIR_IN:
+ case KEY_DIR_OUT:
+ ctx->key_dir = in_dir;
+ break;
+ default:
+ fprintf(stderr,
+ "invalid key direction provided. Can be 0 or 1 only\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ovpn_socket(struct ovpn_ctx *ctx, sa_family_t family, int proto)
+{
+ struct sockaddr_storage local_sock = { 0 };
+ struct sockaddr_in6 *in6;
+ struct sockaddr_in *in;
+ int ret, s, sock_type;
+ size_t sock_len;
+
+ if (proto == IPPROTO_UDP)
+ sock_type = SOCK_DGRAM;
+ else if (proto == IPPROTO_TCP)
+ sock_type = SOCK_STREAM;
+ else
+ return -EINVAL;
+
+ s = socket(family, sock_type, 0);
+ if (s < 0) {
+ perror("cannot create socket");
+ return -1;
+ }
+
+ switch (family) {
+ case AF_INET:
+ in = (struct sockaddr_in *)&local_sock;
+ in->sin_family = family;
+ in->sin_port = htons(ctx->lport);
+ in->sin_addr.s_addr = htonl(INADDR_ANY);
+ sock_len = sizeof(*in);
+ break;
+ case AF_INET6:
+ in6 = (struct sockaddr_in6 *)&local_sock;
+ in6->sin6_family = family;
+ in6->sin6_port = htons(ctx->lport);
+ in6->sin6_addr = in6addr_any;
+ sock_len = sizeof(*in6);
+ break;
+ default:
+ return -1;
+ }
+
+ int opt = 1;
+
+ ret = setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
+
+ if (ret < 0) {
+ perror("setsockopt for SO_REUSEADDR");
+ return ret;
+ }
+
+ ret = setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
+ if (ret < 0) {
+ perror("setsockopt for SO_REUSEPORT");
+ return ret;
+ }
+
+ if (family == AF_INET6) {
+ opt = 0;
+ if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &opt,
+ sizeof(opt))) {
+ perror("failed to set IPV6_V6ONLY");
+ return -1;
+ }
+ }
+
+ ret = bind(s, (struct sockaddr *)&local_sock, sock_len);
+ if (ret < 0) {
+ perror("cannot bind socket");
+ goto err_socket;
+ }
+
+ ctx->socket = s;
+ ctx->sa_family = family;
+ return 0;
+
+err_socket:
+ close(s);
+ return -1;
+}
+
+static int ovpn_udp_socket(struct ovpn_ctx *ctx, sa_family_t family)
+{
+ return ovpn_socket(ctx, family, IPPROTO_UDP);
+}
+
+static int ovpn_listen(struct ovpn_ctx *ctx, sa_family_t family)
+{
+ int ret;
+
+ ret = ovpn_socket(ctx, family, IPPROTO_TCP);
+ if (ret < 0)
+ return ret;
+
+ ret = listen(ctx->socket, 10);
+ if (ret < 0) {
+ perror("listen");
+ close(ctx->socket);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ovpn_accept(struct ovpn_ctx *ctx)
+{
+ socklen_t socklen;
+ int ret;
+
+ socklen = sizeof(ctx->remote);
+ ret = accept(ctx->socket, (struct sockaddr *)&ctx->remote, &socklen);
+ if (ret < 0) {
+ perror("accept");
+ goto err;
+ }
+
+ fprintf(stderr, "Connection received!\n");
+
+ switch (socklen) {
+ case sizeof(struct sockaddr_in):
+ case sizeof(struct sockaddr_in6):
+ break;
+ default:
+ fprintf(stderr, "error: expecting IPv4 or IPv6 connection\n");
+ close(ret);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return ret;
+err:
+ close(ctx->socket);
+ return ret;
+}
+
+static int ovpn_connect(struct ovpn_ctx *ovpn)
+{
+ socklen_t socklen;
+ int s, ret;
+
+ s = socket(ovpn->remote.in4.sin_family, SOCK_STREAM, 0);
+ if (s < 0) {
+ perror("cannot create socket");
+ return -1;
+ }
+
+ switch (ovpn->remote.in4.sin_family) {
+ case AF_INET:
+ socklen = sizeof(struct sockaddr_in);
+ break;
+ case AF_INET6:
+ socklen = sizeof(struct sockaddr_in6);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = connect(s, (struct sockaddr *)&ovpn->remote, socklen);
+ if (ret < 0) {
+ perror("connect");
+ goto err;
+ }
+
+ fprintf(stderr, "connected\n");
+
+ ovpn->socket = s;
+
+ return 0;
+err:
+ close(s);
+ return ret;
+}
+
+static int ovpn_new_peer(struct ovpn_ctx *ovpn, bool is_tcp)
+{
+ struct nlattr *attr;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_NEW);
+ if (!ctx)
+ return -ENOMEM;
+
+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_SOCKET, ovpn->socket);
+
+ if (!is_tcp) {
+ switch (ovpn->remote.in4.sin_family) {
+ case AF_INET:
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV4,
+ ovpn->remote.in4.sin_addr.s_addr);
+ NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT,
+ ovpn->remote.in4.sin_port);
+ break;
+ case AF_INET6:
+ NLA_PUT(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV6,
+ sizeof(ovpn->remote.in6.sin6_addr),
+ &ovpn->remote.in6.sin6_addr);
+ NLA_PUT_U32(ctx->nl_msg,
+ OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ ovpn->remote.in6.sin6_scope_id);
+ NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT,
+ ovpn->remote.in6.sin6_port);
+ break;
+ default:
+ fprintf(stderr,
+ "Invalid family for remote socket address\n");
+ goto nla_put_failure;
+ }
+ }
+
+ if (ovpn->peer_ip_set) {
+ switch (ovpn->peer_ip.in4.sin_family) {
+ case AF_INET:
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_VPN_IPV4,
+ ovpn->peer_ip.in4.sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ NLA_PUT(ctx->nl_msg, OVPN_A_PEER_VPN_IPV6,
+ sizeof(struct in6_addr),
+ &ovpn->peer_ip.in6.sin6_addr);
+ break;
+ default:
+ fprintf(stderr, "Invalid family for peer address\n");
+ goto nla_put_failure;
+ }
+ }
+
+ nla_nest_end(ctx->nl_msg, attr);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_set_peer(struct ovpn_ctx *ovpn)
+{
+ struct nlattr *attr;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_SET);
+ if (!ctx)
+ return -ENOMEM;
+
+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ ovpn->keepalive_interval);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ ovpn->keepalive_timeout);
+ nla_nest_end(ctx->nl_msg, attr);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_del_peer(struct ovpn_ctx *ovpn)
+{
+ struct nlattr *attr;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_DEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id);
+ nla_nest_end(ctx->nl_msg, attr);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_handle_peer(struct nl_msg *msg, void (*arg)__always_unused)
+{
+ struct nlattr *pattrs[OVPN_A_PEER_MAX + 1];
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *attrs[OVPN_A_MAX + 1];
+ __u16 rport = 0, lport = 0;
+
+ nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL);
+
+ if (!attrs[OVPN_A_PEER]) {
+ fprintf(stderr, "no packet content in netlink message\n");
+ return NL_SKIP;
+ }
+
+ nla_parse(pattrs, OVPN_A_PEER_MAX, nla_data(attrs[OVPN_A_PEER]),
+ nla_len(attrs[OVPN_A_PEER]), NULL);
+
+ if (pattrs[OVPN_A_PEER_ID])
+ fprintf(stderr, "* Peer %u\n",
+ nla_get_u32(pattrs[OVPN_A_PEER_ID]));
+
+ if (pattrs[OVPN_A_PEER_SOCKET_NETNSID])
+ fprintf(stderr, "\tsocket NetNS ID: %d\n",
+ nla_get_s32(pattrs[OVPN_A_PEER_SOCKET_NETNSID]));
+
+ if (pattrs[OVPN_A_PEER_VPN_IPV4]) {
+ char buf[INET_ADDRSTRLEN];
+
+ inet_ntop(AF_INET, nla_data(pattrs[OVPN_A_PEER_VPN_IPV4]),
+ buf, sizeof(buf));
+ fprintf(stderr, "\tVPN IPv4: %s\n", buf);
+ }
+
+ if (pattrs[OVPN_A_PEER_VPN_IPV6]) {
+ char buf[INET6_ADDRSTRLEN];
+
+ inet_ntop(AF_INET6, nla_data(pattrs[OVPN_A_PEER_VPN_IPV6]),
+ buf, sizeof(buf));
+ fprintf(stderr, "\tVPN IPv6: %s\n", buf);
+ }
+
+ if (pattrs[OVPN_A_PEER_LOCAL_PORT])
+ lport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_LOCAL_PORT]));
+
+ if (pattrs[OVPN_A_PEER_REMOTE_PORT])
+ rport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_REMOTE_PORT]));
+
+ if (pattrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV6];
+ char buf[INET6_ADDRSTRLEN];
+ int scope_id = -1;
+
+ if (pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) {
+ void *p = pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID];
+
+ scope_id = nla_get_u32(p);
+ }
+
+ inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf));
+ fprintf(stderr, "\tRemote: %s:%hu (scope-id: %u)\n", buf, rport,
+ scope_id);
+
+ if (pattrs[OVPN_A_PEER_LOCAL_IPV6]) {
+ void *ip = pattrs[OVPN_A_PEER_LOCAL_IPV6];
+
+ inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf));
+ fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport);
+ }
+ }
+
+ if (pattrs[OVPN_A_PEER_REMOTE_IPV4]) {
+ void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV4];
+ char buf[INET_ADDRSTRLEN];
+
+ inet_ntop(AF_INET, nla_data(ip), buf, sizeof(buf));
+ fprintf(stderr, "\tRemote: %s:%hu\n", buf, rport);
+
+ if (pattrs[OVPN_A_PEER_LOCAL_IPV4]) {
+ void *p = pattrs[OVPN_A_PEER_LOCAL_IPV4];
+
+ inet_ntop(AF_INET, nla_data(p), buf, sizeof(buf));
+ fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport);
+ }
+ }
+
+ if (pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]) {
+ void *p = pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL];
+
+ fprintf(stderr, "\tKeepalive interval: %u sec\n",
+ nla_get_u32(p));
+ }
+
+ if (pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])
+ fprintf(stderr, "\tKeepalive timeout: %u sec\n",
+ nla_get_u32(pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]));
+
+ if (pattrs[OVPN_A_PEER_VPN_RX_BYTES])
+ fprintf(stderr, "\tVPN RX bytes: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_BYTES]));
+
+ if (pattrs[OVPN_A_PEER_VPN_TX_BYTES])
+ fprintf(stderr, "\tVPN TX bytes: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_BYTES]));
+
+ if (pattrs[OVPN_A_PEER_VPN_RX_PACKETS])
+ fprintf(stderr, "\tVPN RX packets: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_PACKETS]));
+
+ if (pattrs[OVPN_A_PEER_VPN_TX_PACKETS])
+ fprintf(stderr, "\tVPN TX packets: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_PACKETS]));
+
+ if (pattrs[OVPN_A_PEER_LINK_RX_BYTES])
+ fprintf(stderr, "\tLINK RX bytes: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_BYTES]));
+
+ if (pattrs[OVPN_A_PEER_LINK_TX_BYTES])
+ fprintf(stderr, "\tLINK TX bytes: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_BYTES]));
+
+ if (pattrs[OVPN_A_PEER_LINK_RX_PACKETS])
+ fprintf(stderr, "\tLINK RX packets: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_PACKETS]));
+
+ if (pattrs[OVPN_A_PEER_LINK_TX_PACKETS])
+ fprintf(stderr, "\tLINK TX packets: %" PRIu64 "\n",
+ ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_PACKETS]));
+
+ return NL_SKIP;
+}
+
+static int ovpn_get_peer(struct ovpn_ctx *ovpn)
+{
+ int flags = 0, ret = -1;
+ struct nlattr *attr;
+ struct nl_ctx *ctx;
+
+ if (ovpn->peer_id == PEER_ID_UNDEF)
+ flags = NLM_F_DUMP;
+
+ ctx = nl_ctx_alloc_flags(ovpn, OVPN_CMD_PEER_GET, flags);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (ovpn->peer_id != PEER_ID_UNDEF) {
+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id);
+ nla_nest_end(ctx->nl_msg, attr);
+ }
+
+ ret = ovpn_nl_msg_send(ctx, ovpn_handle_peer);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_new_key(struct ovpn_ctx *ovpn)
+{
+ struct nlattr *keyconf, *key_dir;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_NEW);
+ if (!ctx)
+ return -ENOMEM;
+
+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_KEY_ID, ovpn->key_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_CIPHER_ALG, ovpn->cipher);
+
+ key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_ENCRYPT_DIR);
+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_enc);
+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce);
+ nla_nest_end(ctx->nl_msg, key_dir);
+
+ key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_DECRYPT_DIR);
+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_dec);
+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce);
+ nla_nest_end(ctx->nl_msg, key_dir);
+
+ nla_nest_end(ctx->nl_msg, keyconf);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_del_key(struct ovpn_ctx *ovpn)
+{
+ struct nlattr *keyconf;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_DEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot);
+ nla_nest_end(ctx->nl_msg, keyconf);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_handle_key(struct nl_msg *msg, void (*arg)__always_unused)
+{
+ struct nlattr *kattrs[OVPN_A_KEYCONF_MAX + 1];
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *attrs[OVPN_A_MAX + 1];
+
+ nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL);
+
+ if (!attrs[OVPN_A_KEYCONF]) {
+ fprintf(stderr, "no packet content in netlink message\n");
+ return NL_SKIP;
+ }
+
+ nla_parse(kattrs, OVPN_A_KEYCONF_MAX, nla_data(attrs[OVPN_A_KEYCONF]),
+ nla_len(attrs[OVPN_A_KEYCONF]), NULL);
+
+ if (kattrs[OVPN_A_KEYCONF_PEER_ID])
+ fprintf(stderr, "* Peer %u\n",
+ nla_get_u32(kattrs[OVPN_A_KEYCONF_PEER_ID]));
+ if (kattrs[OVPN_A_KEYCONF_SLOT]) {
+ fprintf(stderr, "\t- Slot: ");
+ switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT])) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ fprintf(stderr, "primary\n");
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ fprintf(stderr, "secondary\n");
+ break;
+ default:
+ fprintf(stderr, "invalid (%u)\n",
+ nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT]));
+ break;
+ }
+ }
+ if (kattrs[OVPN_A_KEYCONF_KEY_ID])
+ fprintf(stderr, "\t- Key ID: %u\n",
+ nla_get_u32(kattrs[OVPN_A_KEYCONF_KEY_ID]));
+ if (kattrs[OVPN_A_KEYCONF_CIPHER_ALG]) {
+ fprintf(stderr, "\t- Cipher: ");
+ switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG])) {
+ case OVPN_CIPHER_ALG_NONE:
+ fprintf(stderr, "none\n");
+ break;
+ case OVPN_CIPHER_ALG_AES_GCM:
+ fprintf(stderr, "aes-gcm\n");
+ break;
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ fprintf(stderr, "chacha20poly1305\n");
+ break;
+ default:
+ fprintf(stderr, "invalid (%u)\n",
+ nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG]));
+ break;
+ }
+ }
+
+ return NL_SKIP;
+}
+
+static int ovpn_get_key(struct ovpn_ctx *ovpn)
+{
+ struct nlattr *keyconf;
+ struct nl_ctx *ctx;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_GET);
+ if (!ctx)
+ return -ENOMEM;
+
+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot);
+ nla_nest_end(ctx->nl_msg, keyconf);
+
+ ret = ovpn_nl_msg_send(ctx, ovpn_handle_key);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+static int ovpn_swap_keys(struct ovpn_ctx *ovpn)
+{
+ struct nl_ctx *ctx;
+ struct nlattr *kc;
+ int ret = -1;
+
+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_SWAP);
+ if (!ctx)
+ return -ENOMEM;
+
+ kc = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF);
+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id);
+ nla_nest_end(ctx->nl_msg, kc);
+
+ ret = ovpn_nl_msg_send(ctx, NULL);
+nla_put_failure:
+ nl_ctx_free(ctx);
+ return ret;
+}
+
+/* Helper function used to easily add attributes to a rtnl message */
+static int ovpn_addattr(struct nlmsghdr *n, int maxlen, int type,
+ const void *data, int alen)
+{
+ int len = RTA_LENGTH(alen);
+ struct rtattr *rta;
+
+ if ((int)(NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len)) > maxlen) {
+ fprintf(stderr, "%s: rtnl: message exceeded bound of %d\n",
+ __func__, maxlen);
+ return -EMSGSIZE;
+ }
+
+ rta = nlmsg_tail(n);
+ rta->rta_type = type;
+ rta->rta_len = len;
+
+ if (!data)
+ memset(RTA_DATA(rta), 0, alen);
+ else
+ memcpy(RTA_DATA(rta), data, alen);
+
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len);
+
+ return 0;
+}
+
+static struct rtattr *ovpn_nest_start(struct nlmsghdr *msg, size_t max_size,
+ int attr)
+{
+ struct rtattr *nest = nlmsg_tail(msg);
+
+ if (ovpn_addattr(msg, max_size, attr, NULL, 0) < 0)
+ return NULL;
+
+ return nest;
+}
+
+static void ovpn_nest_end(struct nlmsghdr *msg, struct rtattr *nest)
+{
+ nest->rta_len = (uint8_t *)nlmsg_tail(msg) - (uint8_t *)nest;
+}
+
+#define RT_SNDBUF_SIZE (1024 * 2)
+#define RT_RCVBUF_SIZE (1024 * 4)
+
+/* Open RTNL socket */
+static int ovpn_rt_socket(void)
+{
+ int sndbuf = RT_SNDBUF_SIZE, rcvbuf = RT_RCVBUF_SIZE, fd;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (fd < 0) {
+ fprintf(stderr, "%s: cannot open netlink socket\n", __func__);
+ return fd;
+ }
+
+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf,
+ sizeof(sndbuf)) < 0) {
+ fprintf(stderr, "%s: SO_SNDBUF\n", __func__);
+ close(fd);
+ return -1;
+ }
+
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf,
+ sizeof(rcvbuf)) < 0) {
+ fprintf(stderr, "%s: SO_RCVBUF\n", __func__);
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+/* Bind socket to Netlink subsystem */
+static int ovpn_rt_bind(int fd, uint32_t groups)
+{
+ struct sockaddr_nl local = { 0 };
+ socklen_t addr_len;
+
+ local.nl_family = AF_NETLINK;
+ local.nl_groups = groups;
+
+ if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
+ fprintf(stderr, "%s: cannot bind netlink socket: %d\n",
+ __func__, errno);
+ return -errno;
+ }
+
+ addr_len = sizeof(local);
+ if (getsockname(fd, (struct sockaddr *)&local, &addr_len) < 0) {
+ fprintf(stderr, "%s: cannot getsockname: %d\n", __func__,
+ errno);
+ return -errno;
+ }
+
+ if (addr_len != sizeof(local)) {
+ fprintf(stderr, "%s: wrong address length %d\n", __func__,
+ addr_len);
+ return -EINVAL;
+ }
+
+ if (local.nl_family != AF_NETLINK) {
+ fprintf(stderr, "%s: wrong address family %d\n", __func__,
+ local.nl_family);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+typedef int (*ovpn_parse_reply_cb)(struct nlmsghdr *msg, void *arg);
+
+/* Send Netlink message and run callback on reply (if specified) */
+static int ovpn_rt_send(struct nlmsghdr *payload, pid_t peer,
+ unsigned int groups, ovpn_parse_reply_cb cb,
+ void *arg_cb)
+{
+ int len, rem_len, fd, ret, rcv_len;
+ struct sockaddr_nl nladdr = { 0 };
+ struct nlmsgerr *err;
+ struct nlmsghdr *h;
+ char buf[1024 * 16];
+ struct iovec iov = {
+ .iov_base = payload,
+ .iov_len = payload->nlmsg_len,
+ };
+ struct msghdr nlmsg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+
+ nladdr.nl_family = AF_NETLINK;
+ nladdr.nl_pid = peer;
+ nladdr.nl_groups = groups;
+
+ payload->nlmsg_seq = time(NULL);
+
+ /* no need to send reply */
+ if (!cb)
+ payload->nlmsg_flags |= NLM_F_ACK;
+
+ fd = ovpn_rt_socket();
+ if (fd < 0) {
+ fprintf(stderr, "%s: can't open rtnl socket\n", __func__);
+ return -errno;
+ }
+
+ ret = ovpn_rt_bind(fd, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s: can't bind rtnl socket\n", __func__);
+ ret = -errno;
+ goto out;
+ }
+
+ ret = sendmsg(fd, &nlmsg, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s: rtnl: error on sendmsg()\n", __func__);
+ ret = -errno;
+ goto out;
+ }
+
+ /* prepare buffer to store RTNL replies */
+ memset(buf, 0, sizeof(buf));
+ iov.iov_base = buf;
+
+ while (1) {
+ /*
+ * iov_len is modified by recvmsg(), therefore has to be initialized before
+ * using it again
+ */
+ iov.iov_len = sizeof(buf);
+ rcv_len = recvmsg(fd, &nlmsg, 0);
+ if (rcv_len < 0) {
+ if (errno == EINTR || errno == EAGAIN) {
+ fprintf(stderr, "%s: interrupted call\n",
+ __func__);
+ continue;
+ }
+ fprintf(stderr, "%s: rtnl: error on recvmsg()\n",
+ __func__);
+ ret = -errno;
+ goto out;
+ }
+
+ if (rcv_len == 0) {
+ fprintf(stderr,
+ "%s: rtnl: socket reached unexpected EOF\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (nlmsg.msg_namelen != sizeof(nladdr)) {
+ fprintf(stderr,
+ "%s: sender address length: %u (expected %zu)\n",
+ __func__, nlmsg.msg_namelen, sizeof(nladdr));
+ ret = -EIO;
+ goto out;
+ }
+
+ h = (struct nlmsghdr *)buf;
+ while (rcv_len >= (int)sizeof(*h)) {
+ len = h->nlmsg_len;
+ rem_len = len - sizeof(*h);
+
+ if (rem_len < 0 || len > rcv_len) {
+ if (nlmsg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "%s: truncated message\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+ fprintf(stderr, "%s: malformed message: len=%d\n",
+ __func__, len);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (h->nlmsg_type == NLMSG_DONE) {
+ ret = 0;
+ goto out;
+ }
+
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ err = (struct nlmsgerr *)NLMSG_DATA(h);
+ if (rem_len < (int)sizeof(struct nlmsgerr)) {
+ fprintf(stderr, "%s: ERROR truncated\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (err->error) {
+ fprintf(stderr, "%s: (%d) %s\n",
+ __func__, err->error,
+ strerror(-err->error));
+ ret = err->error;
+ goto out;
+ }
+
+ ret = 0;
+ if (cb) {
+ int r = cb(h, arg_cb);
+
+ if (r <= 0)
+ ret = r;
+ }
+ goto out;
+ }
+
+ if (cb) {
+ int r = cb(h, arg_cb);
+
+ if (r <= 0) {
+ ret = r;
+ goto out;
+ }
+ } else {
+ fprintf(stderr, "%s: RTNL: unexpected reply\n",
+ __func__);
+ }
+
+ rcv_len -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((uint8_t *)h +
+ NLMSG_ALIGN(len));
+ }
+
+ if (nlmsg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "%s: message truncated\n", __func__);
+ continue;
+ }
+
+ if (rcv_len) {
+ fprintf(stderr, "%s: rtnl: %d not parsed bytes\n",
+ __func__, rcv_len);
+ ret = -1;
+ goto out;
+ }
+ }
+out:
+ close(fd);
+
+ return ret;
+}
+
+struct ovpn_link_req {
+ struct nlmsghdr n;
+ struct ifinfomsg i;
+ char buf[256];
+};
+
+static int ovpn_new_iface(struct ovpn_ctx *ovpn)
+{
+ struct rtattr *linkinfo, *data;
+ struct ovpn_link_req req = { 0 };
+ int ret = -1;
+
+ fprintf(stdout, "Creating interface %s with mode %u\n", ovpn->ifname,
+ ovpn->mode);
+
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i));
+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ req.n.nlmsg_type = RTM_NEWLINK;
+
+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_IFNAME, ovpn->ifname,
+ strlen(ovpn->ifname) + 1) < 0)
+ goto err;
+
+ linkinfo = ovpn_nest_start(&req.n, sizeof(req), IFLA_LINKINFO);
+ if (!linkinfo)
+ goto err;
+
+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_INFO_KIND, OVPN_FAMILY_NAME,
+ strlen(OVPN_FAMILY_NAME) + 1) < 0)
+ goto err;
+
+ if (ovpn->mode_set) {
+ data = ovpn_nest_start(&req.n, sizeof(req), IFLA_INFO_DATA);
+ if (!data)
+ goto err;
+
+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_OVPN_MODE,
+ &ovpn->mode, sizeof(uint8_t)) < 0)
+ goto err;
+
+ ovpn_nest_end(&req.n, data);
+ }
+
+ ovpn_nest_end(&req.n, linkinfo);
+
+ req.i.ifi_family = AF_PACKET;
+
+ ret = ovpn_rt_send(&req.n, 0, 0, NULL, NULL);
+err:
+ return ret;
+}
+
+static int ovpn_del_iface(struct ovpn_ctx *ovpn)
+{
+ struct ovpn_link_req req = { 0 };
+
+ fprintf(stdout, "Deleting interface %s ifindex %u\n", ovpn->ifname,
+ ovpn->ifindex);
+
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i));
+ req.n.nlmsg_flags = NLM_F_REQUEST;
+ req.n.nlmsg_type = RTM_DELLINK;
+
+ req.i.ifi_family = AF_PACKET;
+ req.i.ifi_index = ovpn->ifindex;
+
+ return ovpn_rt_send(&req.n, 0, 0, NULL, NULL);
+}
+
+static int nl_seq_check(struct nl_msg (*msg)__always_unused,
+ void (*arg)__always_unused)
+{
+ return NL_OK;
+}
+
+struct mcast_handler_args {
+ const char *group;
+ int id;
+};
+
+static int mcast_family_handler(struct nl_msg *msg, void *arg)
+{
+ struct mcast_handler_args *grp = arg;
+ struct nlattr *tb[CTRL_ATTR_MAX + 1];
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *mcgrp;
+ int rem_mcgrp;
+
+ nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL);
+
+ if (!tb[CTRL_ATTR_MCAST_GROUPS])
+ return NL_SKIP;
+
+ nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], rem_mcgrp) {
+ struct nlattr *tb_mcgrp[CTRL_ATTR_MCAST_GRP_MAX + 1];
+
+ nla_parse(tb_mcgrp, CTRL_ATTR_MCAST_GRP_MAX,
+ nla_data(mcgrp), nla_len(mcgrp), NULL);
+
+ if (!tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME] ||
+ !tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID])
+ continue;
+ if (strncmp(nla_data(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]),
+ grp->group, nla_len(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME])))
+ continue;
+ grp->id = nla_get_u32(tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]);
+ break;
+ }
+
+ return NL_SKIP;
+}
+
+static int mcast_error_handler(struct sockaddr_nl (*nla)__always_unused,
+ struct nlmsgerr *err, void *arg)
+{
+ int *ret = arg;
+
+ *ret = err->error;
+ return NL_STOP;
+}
+
+static int mcast_ack_handler(struct nl_msg (*msg)__always_unused, void *arg)
+{
+ int *ret = arg;
+
+ *ret = 0;
+ return NL_STOP;
+}
+
+static int ovpn_handle_msg(struct nl_msg *msg, void *arg)
+{
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *attrs[OVPN_A_MAX + 1];
+ struct nlmsghdr *nlh = nlmsg_hdr(msg);
+ char ifname[IF_NAMESIZE];
+ int *ret = arg;
+ __u32 ifindex;
+
+ fprintf(stderr, "received message from ovpn-dco\n");
+
+ *ret = -1;
+
+ if (!genlmsg_valid_hdr(nlh, 0)) {
+ fprintf(stderr, "invalid header\n");
+ return NL_STOP;
+ }
+
+ if (nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL)) {
+ fprintf(stderr, "received bogus data from ovpn-dco\n");
+ return NL_STOP;
+ }
+
+ if (!attrs[OVPN_A_IFINDEX]) {
+ fprintf(stderr, "no ifindex in this message\n");
+ return NL_STOP;
+ }
+
+ ifindex = nla_get_u32(attrs[OVPN_A_IFINDEX]);
+ if (!if_indextoname(ifindex, ifname)) {
+ fprintf(stderr, "cannot resolve ifname for ifindex: %u\n",
+ ifindex);
+ return NL_STOP;
+ }
+
+ switch (gnlh->cmd) {
+ case OVPN_CMD_PEER_DEL_NTF:
+ fprintf(stdout, "received CMD_PEER_DEL_NTF\n");
+ break;
+ case OVPN_CMD_KEY_SWAP_NTF:
+ fprintf(stdout, "received CMD_KEY_SWAP_NTF\n");
+ break;
+ default:
+ fprintf(stderr, "received unknown command: %d\n", gnlh->cmd);
+ return NL_STOP;
+ }
+
+ *ret = 0;
+ return NL_OK;
+}
+
+static int ovpn_get_mcast_id(struct nl_sock *sock, const char *family,
+ const char *group)
+{
+ struct nl_msg *msg;
+ struct nl_cb *cb;
+ int ret, ctrlid;
+ struct mcast_handler_args grp = {
+ .group = group,
+ .id = -ENOENT,
+ };
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ return -ENOMEM;
+
+ cb = nl_cb_alloc(NL_CB_DEFAULT);
+ if (!cb) {
+ ret = -ENOMEM;
+ goto out_fail_cb;
+ }
+
+ ctrlid = genl_ctrl_resolve(sock, "nlctrl");
+
+ genlmsg_put(msg, 0, 0, ctrlid, 0, 0, CTRL_CMD_GETFAMILY, 0);
+
+ ret = -ENOBUFS;
+ NLA_PUT_STRING(msg, CTRL_ATTR_FAMILY_NAME, family);
+
+ ret = nl_send_auto_complete(sock, msg);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ ret = 1;
+
+ nl_cb_err(cb, NL_CB_CUSTOM, mcast_error_handler, &ret);
+ nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, mcast_ack_handler, &ret);
+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, mcast_family_handler, &grp);
+
+ while (ret > 0)
+ nl_recvmsgs(sock, cb);
+
+ if (ret == 0)
+ ret = grp.id;
+ nla_put_failure:
+ nl_cb_put(cb);
+ out_fail_cb:
+ nlmsg_free(msg);
+ return ret;
+}
+
+static int ovpn_listen_mcast(void)
+{
+ struct nl_sock *sock;
+ struct nl_cb *cb;
+ int mcid, ret;
+
+ sock = nl_socket_alloc();
+ if (!sock) {
+ fprintf(stderr, "cannot allocate netlink socket\n");
+ goto err_free;
+ }
+
+ nl_socket_set_buffer_size(sock, 8192, 8192);
+
+ ret = genl_connect(sock);
+ if (ret < 0) {
+ fprintf(stderr, "cannot connect to generic netlink: %s\n",
+ nl_geterror(ret));
+ goto err_free;
+ }
+
+ mcid = ovpn_get_mcast_id(sock, OVPN_FAMILY_NAME, OVPN_MCGRP_PEERS);
+ if (mcid < 0) {
+ fprintf(stderr, "cannot get mcast group: %s\n",
+ nl_geterror(mcid));
+ goto err_free;
+ }
+
+ ret = nl_socket_add_membership(sock, mcid);
+ if (ret) {
+ fprintf(stderr, "failed to join mcast group: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = 1;
+ cb = nl_cb_alloc(NL_CB_DEFAULT);
+ nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check, NULL);
+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, ovpn_handle_msg, &ret);
+ nl_cb_err(cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &ret);
+
+ while (ret == 1) {
+ int err = nl_recvmsgs(sock, cb);
+
+ if (err < 0) {
+ fprintf(stderr,
+ "cannot receive netlink message: (%d) %s\n",
+ err, nl_geterror(-err));
+ ret = -1;
+ break;
+ }
+ }
+
+ nl_cb_put(cb);
+err_free:
+ nl_socket_free(sock);
+ return ret;
+}
+
+static void usage(const char *cmd)
+{
+ fprintf(stderr,
+ "Usage %s <command> <iface> [arguments..]\n",
+ cmd);
+ fprintf(stderr, "where <command> can be one of the following\n\n");
+
+ fprintf(stderr, "* new_iface <iface> [mode]: create new ovpn interface\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tmode:\n");
+ fprintf(stderr, "\t\t- P2P for peer-to-peer mode (i.e. client)\n");
+ fprintf(stderr, "\t\t- MP for multi-peer mode (i.e. server)\n");
+
+ fprintf(stderr, "* del_iface <iface>: delete ovpn interface\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+
+ fprintf(stderr,
+ "* listen <iface> <lport> <peers_file> [ipv6]: listen for incoming peer TCP connections\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tlport: TCP port to listen to\n");
+ fprintf(stderr,
+ "\tpeers_file: file containing one peer per line: Line format:\n");
+ fprintf(stderr, "\t\t<peer_id> <vpnaddr>\n");
+ fprintf(stderr,
+ "\tipv6: whether the socket should listen to the IPv6 wildcard address\n");
+
+ fprintf(stderr,
+ "* connect <iface> <peer_id> <raddr> <rport> [key_file]: start connecting peer of TCP-based VPN session\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the connecting peer\n");
+ fprintf(stderr, "\traddr: peer IP address to connect to\n");
+ fprintf(stderr, "\trport: peer TCP port to connect to\n");
+ fprintf(stderr,
+ "\tkey_file: file containing the symmetric key for encryption\n");
+
+ fprintf(stderr,
+ "* new_peer <iface> <peer_id> <lport> <raddr> <rport> [vpnaddr]: add new peer\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tlport: local UDP port to bind to\n");
+ fprintf(stderr,
+ "\tpeer_id: peer ID to be used in data packets to/from this peer\n");
+ fprintf(stderr, "\traddr: peer IP address\n");
+ fprintf(stderr, "\trport: peer UDP port\n");
+ fprintf(stderr, "\tvpnaddr: peer VPN IP\n");
+
+ fprintf(stderr,
+ "* new_multi_peer <iface> <lport> <peers_file>: add multiple peers as listed in the file\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tlport: local UDP port to bind to\n");
+ fprintf(stderr,
+ "\tpeers_file: text file containing one peer per line. Line format:\n");
+ fprintf(stderr, "\t\t<peer_id> <raddr> <rport> <vpnaddr>\n");
+
+ fprintf(stderr,
+ "* set_peer <iface> <peer_id> <keepalive_interval> <keepalive_timeout>: set peer attributes\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n");
+ fprintf(stderr,
+ "\tkeepalive_interval: interval for sending ping messages\n");
+ fprintf(stderr,
+ "\tkeepalive_timeout: time after which a peer is timed out\n");
+
+ fprintf(stderr, "* del_peer <iface> <peer_id>: delete peer\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the peer to delete\n");
+
+ fprintf(stderr, "* get_peer <iface> [peer_id]: retrieve peer(s) status\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr,
+ "\tpeer_id: peer ID of the peer to query. All peers are returned if omitted\n");
+
+ fprintf(stderr,
+ "* new_key <iface> <peer_id> <slot> <key_id> <cipher> <key_dir> <key_file>: set data channel key\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr,
+ "\tpeer_id: peer ID of the peer to configure the key for\n");
+ fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n");
+ fprintf(stderr, "\tkey_id: an ID from 0 to 7\n");
+ fprintf(stderr,
+ "\tcipher: cipher to use, supported: aes (AES-GCM), chachapoly (CHACHA20POLY1305)\n");
+ fprintf(stderr,
+ "\tkey_dir: key direction, must 0 on one host and 1 on the other\n");
+ fprintf(stderr, "\tkey_file: file containing the pre-shared key\n");
+
+ fprintf(stderr,
+ "* del_key <iface> <peer_id> [slot]: erase existing data channel key\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n");
+ fprintf(stderr, "\tslot: slot to erase. PRIMARY if omitted\n");
+
+ fprintf(stderr,
+ "* get_key <iface> <peer_id> <slot>: retrieve non sensible key data\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the peer to query\n");
+ fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n");
+
+ fprintf(stderr,
+ "* swap_keys <iface> <peer_id>: swap content of primary and secondary key slots\n");
+ fprintf(stderr, "\tiface: ovpn interface name\n");
+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n");
+
+ fprintf(stderr,
+ "* listen_mcast: listen to ovpn netlink multicast messages\n");
+}
+
+static int ovpn_parse_remote(struct ovpn_ctx *ovpn, const char *host,
+ const char *service, const char *vpnip)
+{
+ int ret;
+ struct addrinfo *result;
+ struct addrinfo hints = {
+ .ai_family = ovpn->sa_family,
+ .ai_socktype = SOCK_DGRAM,
+ .ai_protocol = IPPROTO_UDP
+ };
+
+ if (host) {
+ ret = getaddrinfo(host, service, &hints, &result);
+ if (ret == EAI_NONAME || ret == EAI_FAIL)
+ return -1;
+
+ if (!(result->ai_family == AF_INET &&
+ result->ai_addrlen == sizeof(struct sockaddr_in)) &&
+ !(result->ai_family == AF_INET6 &&
+ result->ai_addrlen == sizeof(struct sockaddr_in6))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ovpn->remote, result->ai_addr, result->ai_addrlen);
+ }
+
+ if (vpnip) {
+ ret = getaddrinfo(vpnip, NULL, &hints, &result);
+ if (ret == EAI_NONAME || ret == EAI_FAIL)
+ return -1;
+
+ if (!(result->ai_family == AF_INET &&
+ result->ai_addrlen == sizeof(struct sockaddr_in)) &&
+ !(result->ai_family == AF_INET6 &&
+ result->ai_addrlen == sizeof(struct sockaddr_in6))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ovpn->peer_ip, result->ai_addr, result->ai_addrlen);
+ ovpn->sa_family = result->ai_family;
+
+ ovpn->peer_ip_set = true;
+ }
+
+ ret = 0;
+out:
+ freeaddrinfo(result);
+ return ret;
+}
+
+static int ovpn_parse_new_peer(struct ovpn_ctx *ovpn, const char *peer_id,
+ const char *raddr, const char *rport,
+ const char *vpnip)
+{
+ ovpn->peer_id = strtoul(peer_id, NULL, 10);
+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+
+ return ovpn_parse_remote(ovpn, raddr, rport, vpnip);
+}
+
+static int ovpn_parse_key_slot(const char *arg, struct ovpn_ctx *ovpn)
+{
+ int slot = strtoul(arg, NULL, 10);
+
+ if (errno == ERANGE || slot < 1 || slot > 2) {
+ fprintf(stderr, "key slot out of range\n");
+ return -1;
+ }
+
+ switch (slot) {
+ case 1:
+ ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY;
+ break;
+ case 2:
+ ovpn->key_slot = OVPN_KEY_SLOT_SECONDARY;
+ break;
+ }
+
+ return 0;
+}
+
+static int ovpn_send_tcp_data(int socket)
+{
+ uint16_t len = htons(1000);
+ uint8_t buf[1002];
+ int ret;
+
+ memcpy(buf, &len, sizeof(len));
+ memset(buf + sizeof(len), 0x86, sizeof(buf) - sizeof(len));
+
+ ret = send(socket, buf, sizeof(buf), MSG_NOSIGNAL);
+
+ fprintf(stdout, "Sent %u bytes over TCP socket\n", ret);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int ovpn_recv_tcp_data(int socket)
+{
+ uint8_t buf[1002];
+ uint16_t len;
+ int ret;
+
+ ret = recv(socket, buf, sizeof(buf), MSG_NOSIGNAL);
+
+ if (ret < 2) {
+ fprintf(stderr, ">>>> Error while reading TCP data: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&len, buf, sizeof(len));
+ len = ntohs(len);
+
+ fprintf(stdout, ">>>> Received %u bytes over TCP socket, header: %u\n",
+ ret, len);
+
+ return 0;
+}
+
+static enum ovpn_cmd ovpn_parse_cmd(const char *cmd)
+{
+ if (!strcmp(cmd, "new_iface"))
+ return CMD_NEW_IFACE;
+
+ if (!strcmp(cmd, "del_iface"))
+ return CMD_DEL_IFACE;
+
+ if (!strcmp(cmd, "listen"))
+ return CMD_LISTEN;
+
+ if (!strcmp(cmd, "connect"))
+ return CMD_CONNECT;
+
+ if (!strcmp(cmd, "new_peer"))
+ return CMD_NEW_PEER;
+
+ if (!strcmp(cmd, "new_multi_peer"))
+ return CMD_NEW_MULTI_PEER;
+
+ if (!strcmp(cmd, "set_peer"))
+ return CMD_SET_PEER;
+
+ if (!strcmp(cmd, "del_peer"))
+ return CMD_DEL_PEER;
+
+ if (!strcmp(cmd, "get_peer"))
+ return CMD_GET_PEER;
+
+ if (!strcmp(cmd, "new_key"))
+ return CMD_NEW_KEY;
+
+ if (!strcmp(cmd, "del_key"))
+ return CMD_DEL_KEY;
+
+ if (!strcmp(cmd, "get_key"))
+ return CMD_GET_KEY;
+
+ if (!strcmp(cmd, "swap_keys"))
+ return CMD_SWAP_KEYS;
+
+ if (!strcmp(cmd, "listen_mcast"))
+ return CMD_LISTEN_MCAST;
+
+ return CMD_INVALID;
+}
+
+/* Send process to background and waits for signal.
+ *
+ * This helper is called at the end of commands
+ * creating sockets, so that the latter stay alive
+ * along with the process that created them.
+ *
+ * A signal is expected to be delivered in order to
+ * terminate the waiting processes
+ */
+static void ovpn_waitbg(void)
+{
+ daemon(1, 1);
+ pause();
+}
+
+static int ovpn_run_cmd(struct ovpn_ctx *ovpn)
+{
+ char peer_id[10], vpnip[INET6_ADDRSTRLEN], raddr[128], rport[10];
+ int n, ret;
+ FILE *fp;
+
+ switch (ovpn->cmd) {
+ case CMD_NEW_IFACE:
+ ret = ovpn_new_iface(ovpn);
+ break;
+ case CMD_DEL_IFACE:
+ ret = ovpn_del_iface(ovpn);
+ break;
+ case CMD_LISTEN:
+ ret = ovpn_listen(ovpn, ovpn->sa_family);
+ if (ret < 0) {
+ fprintf(stderr, "cannot listen on TCP socket\n");
+ return ret;
+ }
+
+ fp = fopen(ovpn->peers_file, "r");
+ if (!fp) {
+ fprintf(stderr, "cannot open file: %s\n",
+ ovpn->peers_file);
+ return -1;
+ }
+
+ int num_peers = 0;
+
+ while ((n = fscanf(fp, "%s %s\n", peer_id, vpnip)) == 2) {
+ struct ovpn_ctx peer_ctx = { 0 };
+
+ if (num_peers == MAX_PEERS) {
+ fprintf(stderr, "max peers reached!\n");
+ return -E2BIG;
+ }
+
+ peer_ctx.ifindex = ovpn->ifindex;
+ peer_ctx.sa_family = ovpn->sa_family;
+
+ peer_ctx.socket = ovpn_accept(ovpn);
+ if (peer_ctx.socket < 0) {
+ fprintf(stderr, "cannot accept connection!\n");
+ return -1;
+ }
+
+ /* store peer sockets to test TCP I/O */
+ ovpn->cli_sockets[num_peers] = peer_ctx.socket;
+
+ ret = ovpn_parse_new_peer(&peer_ctx, peer_id, NULL,
+ NULL, vpnip);
+ if (ret < 0) {
+ fprintf(stderr, "error while parsing line\n");
+ return -1;
+ }
+
+ ret = ovpn_new_peer(&peer_ctx, true);
+ if (ret < 0) {
+ fprintf(stderr,
+ "cannot add peer to VPN: %s %s\n",
+ peer_id, vpnip);
+ return ret;
+ }
+ num_peers++;
+ }
+
+ for (int i = 0; i < num_peers; i++) {
+ ret = ovpn_recv_tcp_data(ovpn->cli_sockets[i]);
+ if (ret < 0)
+ break;
+ }
+ ovpn_waitbg();
+ break;
+ case CMD_CONNECT:
+ ret = ovpn_connect(ovpn);
+ if (ret < 0) {
+ fprintf(stderr, "cannot connect TCP socket\n");
+ return ret;
+ }
+
+ ret = ovpn_new_peer(ovpn, true);
+ if (ret < 0) {
+ fprintf(stderr, "cannot add peer to VPN\n");
+ close(ovpn->socket);
+ return ret;
+ }
+
+ if (ovpn->cipher != OVPN_CIPHER_ALG_NONE) {
+ ret = ovpn_new_key(ovpn);
+ if (ret < 0) {
+ fprintf(stderr, "cannot set key\n");
+ return ret;
+ }
+ }
+
+ ret = ovpn_send_tcp_data(ovpn->socket);
+ ovpn_waitbg();
+ break;
+ case CMD_NEW_PEER:
+ ret = ovpn_udp_socket(ovpn, AF_INET6);
+ if (ret < 0)
+ return ret;
+
+ ret = ovpn_new_peer(ovpn, false);
+ ovpn_waitbg();
+ break;
+ case CMD_NEW_MULTI_PEER:
+ ret = ovpn_udp_socket(ovpn, AF_INET6);
+ if (ret < 0)
+ return ret;
+
+ fp = fopen(ovpn->peers_file, "r");
+ if (!fp) {
+ fprintf(stderr, "cannot open file: %s\n",
+ ovpn->peers_file);
+ return -1;
+ }
+
+ while ((n = fscanf(fp, "%s %s %s %s\n", peer_id, raddr, rport,
+ vpnip)) == 4) {
+ struct ovpn_ctx peer_ctx = { 0 };
+
+ peer_ctx.ifindex = ovpn->ifindex;
+ peer_ctx.socket = ovpn->socket;
+ peer_ctx.sa_family = AF_UNSPEC;
+
+ ret = ovpn_parse_new_peer(&peer_ctx, peer_id, raddr,
+ rport, vpnip);
+ if (ret < 0) {
+ fprintf(stderr, "error while parsing line\n");
+ return -1;
+ }
+
+ ret = ovpn_new_peer(&peer_ctx, false);
+ if (ret < 0) {
+ fprintf(stderr,
+ "cannot add peer to VPN: %s %s %s %s\n",
+ peer_id, raddr, rport, vpnip);
+ return ret;
+ }
+ }
+ ovpn_waitbg();
+ break;
+ case CMD_SET_PEER:
+ ret = ovpn_set_peer(ovpn);
+ break;
+ case CMD_DEL_PEER:
+ ret = ovpn_del_peer(ovpn);
+ break;
+ case CMD_GET_PEER:
+ if (ovpn->peer_id == PEER_ID_UNDEF)
+ fprintf(stderr, "List of peers connected to: %s\n",
+ ovpn->ifname);
+
+ ret = ovpn_get_peer(ovpn);
+ break;
+ case CMD_NEW_KEY:
+ ret = ovpn_new_key(ovpn);
+ break;
+ case CMD_DEL_KEY:
+ ret = ovpn_del_key(ovpn);
+ break;
+ case CMD_GET_KEY:
+ ret = ovpn_get_key(ovpn);
+ break;
+ case CMD_SWAP_KEYS:
+ ret = ovpn_swap_keys(ovpn);
+ break;
+ case CMD_LISTEN_MCAST:
+ ret = ovpn_listen_mcast();
+ break;
+ case CMD_INVALID:
+ break;
+ }
+
+ return ret;
+}
+
+static int ovpn_parse_cmd_args(struct ovpn_ctx *ovpn, int argc, char *argv[])
+{
+ int ret;
+
+ /* no args required for LISTEN_MCAST */
+ if (ovpn->cmd == CMD_LISTEN_MCAST)
+ return 0;
+
+ /* all commands need an ifname */
+ if (argc < 3)
+ return -EINVAL;
+
+ strscpy(ovpn->ifname, argv[2], IFNAMSIZ - 1);
+ ovpn->ifname[IFNAMSIZ - 1] = '\0';
+
+ /* all commands, except NEW_IFNAME, needs an ifindex */
+ if (ovpn->cmd != CMD_NEW_IFACE) {
+ ovpn->ifindex = if_nametoindex(ovpn->ifname);
+ if (!ovpn->ifindex) {
+ fprintf(stderr, "cannot find interface: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ }
+
+ switch (ovpn->cmd) {
+ case CMD_NEW_IFACE:
+ if (argc < 4)
+ break;
+
+ if (!strcmp(argv[3], "P2P")) {
+ ovpn->mode = OVPN_MODE_P2P;
+ } else if (!strcmp(argv[3], "MP")) {
+ ovpn->mode = OVPN_MODE_MP;
+ } else {
+ fprintf(stderr, "Cannot parse iface mode: %s\n",
+ argv[3]);
+ return -1;
+ }
+ ovpn->mode_set = true;
+ break;
+ case CMD_DEL_IFACE:
+ break;
+ case CMD_LISTEN:
+ if (argc < 5)
+ return -EINVAL;
+
+ ovpn->lport = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE || ovpn->lport > 65535) {
+ fprintf(stderr, "lport value out of range\n");
+ return -1;
+ }
+
+ ovpn->peers_file = argv[4];
+
+ if (argc > 5 && !strcmp(argv[5], "ipv6"))
+ ovpn->sa_family = AF_INET6;
+ break;
+ case CMD_CONNECT:
+ if (argc < 6)
+ return -EINVAL;
+
+ ovpn->sa_family = AF_INET;
+
+ ret = ovpn_parse_new_peer(ovpn, argv[3], argv[4], argv[5],
+ NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Cannot parse remote peer data\n");
+ return -1;
+ }
+
+ if (argc > 6) {
+ ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY;
+ ovpn->key_id = 0;
+ ovpn->cipher = OVPN_CIPHER_ALG_AES_GCM;
+ ovpn->key_dir = KEY_DIR_OUT;
+
+ ret = ovpn_parse_key(argv[6], ovpn);
+ if (ret)
+ return -1;
+ }
+ break;
+ case CMD_NEW_PEER:
+ if (argc < 7)
+ return -EINVAL;
+
+ ovpn->lport = strtoul(argv[4], NULL, 10);
+ if (errno == ERANGE || ovpn->lport > 65535) {
+ fprintf(stderr, "lport value out of range\n");
+ return -1;
+ }
+
+ const char *vpnip = (argc > 7) ? argv[7] : NULL;
+
+ ret = ovpn_parse_new_peer(ovpn, argv[3], argv[5], argv[6],
+ vpnip);
+ if (ret < 0)
+ return -1;
+ break;
+ case CMD_NEW_MULTI_PEER:
+ if (argc < 5)
+ return -EINVAL;
+
+ ovpn->lport = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE || ovpn->lport > 65535) {
+ fprintf(stderr, "lport value out of range\n");
+ return -1;
+ }
+
+ ovpn->peers_file = argv[4];
+ break;
+ case CMD_SET_PEER:
+ if (argc < 6)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+
+ ovpn->keepalive_interval = strtoul(argv[4], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr,
+ "keepalive interval value out of range\n");
+ return -1;
+ }
+
+ ovpn->keepalive_timeout = strtoul(argv[5], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr,
+ "keepalive interval value out of range\n");
+ return -1;
+ }
+ break;
+ case CMD_DEL_PEER:
+ if (argc < 4)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+ break;
+ case CMD_GET_PEER:
+ ovpn->peer_id = PEER_ID_UNDEF;
+ if (argc > 3) {
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+ }
+ break;
+ case CMD_NEW_KEY:
+ if (argc < 9)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+
+ ret = ovpn_parse_key_slot(argv[4], ovpn);
+ if (ret)
+ return -1;
+
+ ovpn->key_id = strtoul(argv[5], NULL, 10);
+ if (errno == ERANGE || ovpn->key_id > 2) {
+ fprintf(stderr, "key ID out of range\n");
+ return -1;
+ }
+
+ ret = ovpn_parse_cipher(argv[6], ovpn);
+ if (ret < 0)
+ return -1;
+
+ ret = ovpn_parse_key_direction(argv[7], ovpn);
+ if (ret < 0)
+ return -1;
+
+ ret = ovpn_parse_key(argv[8], ovpn);
+ if (ret)
+ return -1;
+ break;
+ case CMD_DEL_KEY:
+ if (argc < 4)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+
+ ret = ovpn_parse_key_slot(argv[4], ovpn);
+ if (ret)
+ return ret;
+ break;
+ case CMD_GET_KEY:
+ if (argc < 5)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+
+ ret = ovpn_parse_key_slot(argv[4], ovpn);
+ if (ret)
+ return ret;
+ break;
+ case CMD_SWAP_KEYS:
+ if (argc < 4)
+ return -EINVAL;
+
+ ovpn->peer_id = strtoul(argv[3], NULL, 10);
+ if (errno == ERANGE) {
+ fprintf(stderr, "peer ID value out of range\n");
+ return -1;
+ }
+ break;
+ case CMD_LISTEN_MCAST:
+ break;
+ case CMD_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct ovpn_ctx ovpn;
+ int ret;
+
+ if (argc < 2) {
+ usage(argv[0]);
+ return -1;
+ }
+
+ memset(&ovpn, 0, sizeof(ovpn));
+ ovpn.sa_family = AF_INET;
+ ovpn.cipher = OVPN_CIPHER_ALG_NONE;
+
+ ovpn.cmd = ovpn_parse_cmd(argv[1]);
+ if (ovpn.cmd == CMD_INVALID) {
+ fprintf(stderr, "Error: unknown command.\n\n");
+ usage(argv[0]);
+ return -1;
+ }
+
+ ret = ovpn_parse_cmd_args(&ovpn, argc, argv);
+ if (ret < 0) {
+ fprintf(stderr, "Error: invalid arguments.\n\n");
+ if (ret == -EINVAL)
+ usage(argv[0]);
+ return ret;
+ }
+
+ ret = ovpn_run_cmd(&ovpn);
+ if (ret)
+ fprintf(stderr, "Cannot execute command: %s (%d)\n",
+ strerror(-ret), ret);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/net/ovpn/tcp_peers.txt b/tools/testing/selftests/net/ovpn/tcp_peers.txt
new file mode 100644
index 000000000000..d753eebe8716
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/tcp_peers.txt
@@ -0,0 +1,5 @@
+1 5.5.5.2
+2 5.5.5.3
+3 5.5.5.4
+4 5.5.5.5
+5 5.5.5.6
diff --git a/tools/testing/selftests/net/ovpn/test-chachapoly.sh b/tools/testing/selftests/net/ovpn/test-chachapoly.sh
new file mode 100755
index 000000000000..32504079a2b8
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-chachapoly.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+ALG="chachapoly"
+
+source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh
new file mode 100755
index 000000000000..093d44772ffd
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+PROTO="TCP"
+
+source test-close-socket.sh
diff --git a/tools/testing/selftests/net/ovpn/test-close-socket.sh b/tools/testing/selftests/net/ovpn/test-close-socket.sh
new file mode 100755
index 000000000000..5e48a8b67928
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-close-socket.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+#set -x
+set -e
+
+source ./common.sh
+
+cleanup
+
+modprobe -q ovpn || true
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ create_ns ${p}
+done
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ setup_ns ${p} 5.5.5.$((${p} + 1))/24
+done
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ add_peer ${p}
+done
+
+for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120
+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 60 120
+done
+
+sleep 1
+
+for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1))
+done
+
+ip netns exec peer0 iperf3 -1 -s &
+sleep 1
+ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1
+
+cleanup
+
+modprobe -r ovpn || true
diff --git a/tools/testing/selftests/net/ovpn/test-float.sh b/tools/testing/selftests/net/ovpn/test-float.sh
new file mode 100755
index 000000000000..ba5d725e18b0
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-float.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+FLOAT="1"
+
+source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-tcp.sh b/tools/testing/selftests/net/ovpn/test-tcp.sh
new file mode 100755
index 000000000000..ba3f1f315a34
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-tcp.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+PROTO="TCP"
+
+source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test.sh b/tools/testing/selftests/net/ovpn/test.sh
new file mode 100755
index 000000000000..7b62897b0240
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+#set -x
+set -e
+
+source ./common.sh
+
+cleanup
+
+modprobe -q ovpn || true
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ create_ns ${p}
+done
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ setup_ns ${p} 5.5.5.$((${p} + 1))/24
+done
+
+for p in $(seq 0 ${NUM_PEERS}); do
+ add_peer ${p}
+done
+
+for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120
+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 60 120
+done
+
+sleep 1
+
+for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1))
+done
+
+if [ "$FLOAT" == "1" ]; then
+ # make clients float..
+ for p in $(seq 1 ${NUM_PEERS}); do
+ ip -n peer${p} addr del 10.10.${p}.2/24 dev veth${p}
+ ip -n peer${p} addr add 10.10.${p}.3/24 dev veth${p}
+ done
+ for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer${p} ping -qfc 500 -w 3 5.5.5.1
+ done
+fi
+
+ip netns exec peer0 iperf3 -1 -s &
+sleep 1
+ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1
+
+echo "Adding secondary key and then swap:"
+for p in $(seq 1 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 2 1 ${ALG} 0 data64.key
+ ip netns exec peer${p} ${OVPN_CLI} new_key tun${p} ${p} 2 1 ${ALG} 1 data64.key
+ ip netns exec peer${p} ${OVPN_CLI} swap_keys tun${p} ${p}
+done
+
+sleep 1
+
+echo "Querying all peers:"
+ip netns exec peer0 ${OVPN_CLI} get_peer tun0
+ip netns exec peer1 ${OVPN_CLI} get_peer tun1
+
+echo "Querying peer 1:"
+ip netns exec peer0 ${OVPN_CLI} get_peer tun0 1
+
+echo "Querying non-existent peer 10:"
+ip netns exec peer0 ${OVPN_CLI} get_peer tun0 10 || true
+
+echo "Deleting peer 1:"
+ip netns exec peer0 ${OVPN_CLI} del_peer tun0 1
+ip netns exec peer1 ${OVPN_CLI} del_peer tun1 1
+
+echo "Querying keys:"
+for p in $(seq 2 ${NUM_PEERS}); do
+ ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 1
+ ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 2
+done
+
+echo "Deleting peer while sending traffic:"
+(ip netns exec peer2 ping -qf -w 4 5.5.5.1)&
+sleep 2
+ip netns exec peer0 ${OVPN_CLI} del_peer tun0 2
+# following command fails in TCP mode
+# (both ends get conn reset when one peer disconnects)
+ip netns exec peer2 ${OVPN_CLI} del_peer tun2 2 || true
+
+echo "Deleting keys:"
+for p in $(seq 3 ${NUM_PEERS}); do
+ ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 1
+ ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 2
+done
+
+echo "Setting timeout to 3s MP:"
+for p in $(seq 3 ${NUM_PEERS}); do
+ ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 3 3 || true
+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 0 0
+done
+# wait for peers to timeout
+sleep 5
+
+echo "Setting timeout to 3s P2P:"
+for p in $(seq 3 ${NUM_PEERS}); do
+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 3 3
+done
+sleep 5
+
+cleanup
+
+modprobe -r ovpn || true
diff --git a/tools/testing/selftests/net/ovpn/udp_peers.txt b/tools/testing/selftests/net/ovpn/udp_peers.txt
new file mode 100644
index 000000000000..32f14bd9347a
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/udp_peers.txt
@@ -0,0 +1,5 @@
+1 10.10.1.2 1 5.5.5.2
+2 10.10.2.2 1 5.5.5.3
+3 10.10.3.2 1 5.5.5.4
+4 10.10.4.2 1 5.5.5.5
+5 10.10.5.2 1 5.5.5.6