aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/esp4_offload.c
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2017-04-14 10:06:50 +0200
committerSteffen Klassert <steffen.klassert@secunet.com>2017-04-14 10:06:50 +0200
commit7862b4058b9f10c9177f347e7d981511bac87213 (patch)
treea33689f6215116d81214287c8c9b8944ea0e0323 /net/ipv4/esp4_offload.c
parentesp6: Reorganize esp_output (diff)
downloadlinux-dev-7862b4058b9f10c9177f347e7d981511bac87213.tar.xz
linux-dev-7862b4058b9f10c9177f347e7d981511bac87213.zip
esp: Add gso handlers for esp4 and esp6
This patch extends the xfrm_type by an encap function pointer and implements esp4_gso_encap and esp6_gso_encap. These functions doing the basic esp encapsulation for a GSO packet. In case the GSO packet needs to be segmented in software, we add gso_segment functions. This codepath is going to be used on esp hardware offloads. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/ipv4/esp4_offload.c')
-rw-r--r--net/ipv4/esp4_offload.c93
1 files changed, 93 insertions, 0 deletions
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index efaaa44e1073..1e39564cb8b4 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -84,6 +84,97 @@ out:
return NULL;
}
+static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ip_esp_hdr *esph;
+ struct iphdr *iph = ip_hdr(skb);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ int proto = iph->protocol;
+
+ skb_push(skb, -skb_network_offset(skb));
+ esph = ip_esp_hdr(skb);
+ *skb_mac_header(skb) = IPPROTO_ESP;
+
+ esph->spi = x->id.spi;
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+
+ xo->proto = proto;
+}
+
+static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ __u32 seq;
+ int err = 0;
+ struct sk_buff *skb2;
+ struct xfrm_state *x;
+ struct ip_esp_hdr *esph;
+ struct crypto_aead *aead;
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ netdev_features_t esp_features = features;
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
+ if (!xo)
+ goto out;
+
+ seq = xo->seq.low;
+
+ x = skb->sp->xvec[skb->sp->len - 1];
+ aead = x->data;
+ esph = ip_esp_hdr(skb);
+
+ if (esph->spi != x->id.spi)
+ goto out;
+
+ if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
+ goto out;
+
+ __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
+
+ skb->encap_hdr_csum = 1;
+
+ if (!(features & NETIF_F_HW_ESP))
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+ segs = x->outer_mode->gso_segment(x, skb, esp_features);
+ if (IS_ERR_OR_NULL(segs))
+ goto out;
+
+ __skb_pull(skb, skb->data - skb_mac_header(skb));
+
+ skb2 = segs;
+ do {
+ struct sk_buff *nskb = skb2->next;
+
+ xo = xfrm_offload(skb2);
+ xo->flags |= XFRM_GSO_SEGMENT;
+ xo->seq.low = seq;
+ xo->seq.hi = xfrm_replay_seqhi(x, seq);
+
+ if(!(features & NETIF_F_HW_ESP))
+ xo->flags |= CRYPTO_FALLBACK;
+
+ x->outer_mode->xmit(x, skb2);
+
+ err = x->type_offload->xmit(x, skb2, esp_features);
+ if (err) {
+ kfree_skb_list(segs);
+ return ERR_PTR(err);
+ }
+
+ if (!skb_is_gso(skb2))
+ seq++;
+ else
+ seq += skb_shinfo(skb2)->gso_segs;
+
+ skb_push(skb2, skb2->mac_len);
+ skb2 = nskb;
+ } while (skb2);
+
+out:
+ return segs;
+}
+
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
@@ -173,6 +264,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
static const struct net_offload esp4_offload = {
.callbacks = {
.gro_receive = esp4_gro_receive,
+ .gso_segment = esp4_gso_segment,
},
};
@@ -182,6 +274,7 @@ static const struct xfrm_type_offload esp_type_offload = {
.proto = IPPROTO_ESP,
.input_tail = esp_input_tail,
.xmit = esp_xmit,
+ .encap = esp4_gso_encap,
};
static int __init esp4_offload_init(void)