aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e51f8555e11..2e573935a7b9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4016,6 +4016,50 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
return elt;
}
+static void __skb_memzero_explicit(struct sk_buff *skb, unsigned int recursion_level)
+{
+ struct sk_buff *sub_skb;
+ int i;
+
+ if (unlikely(recursion_level >= 24)) {
+ pr_warn("skb not entirely zeroed because of overly complex geometry\n");
+ return;
+ }
+
+ memzero_explicit(skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = skb_frag_address_safe(frag);
+ unsigned int len = skb_frag_size(frag);
+
+ if (addr) {
+ memzero_explicit(addr, len);
+ } else {
+ addr = kmap_atomic(skb_frag_page(frag));
+ if (!addr) {
+ pr_warn("skb not entirely zeroed because of impossible page mapping\n");
+ continue;
+ }
+ memzero_explicit(addr + frag->page_offset, len);
+ kunmap_atomic(addr);
+ }
+ }
+
+ skb_walk_frags(skb, sub_skb)
+ __skb_memzero_explicit(sub_skb, recursion_level + 1);
+}
+
+/**
+ * skb_memzero_explicit - Zero out all allocated bytes of skb to hide secrets
+ * @skb: Socket buffer containing the memory to be zerored
+ */
+void skb_memzero_explicit(struct sk_buff *skb)
+{
+ __skb_memzero_explicit(skb, 0);
+}
+EXPORT_SYMBOL_GPL(skb_memzero_explicit);
+
/**
* skb_to_sgvec - Fill a scatter-gather list from a socket buffer
* @skb: Socket buffer containing the buffers to be mapped