aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h24
1 files changed, 0 insertions, 24 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b33a3a1f205e..6a2c34e6d962 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -589,9 +589,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-extern void skb_recycle(struct sk_buff *skb);
-extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
-
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
@@ -2645,27 +2642,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
-static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
-{
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return false;
-
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_offset(skb) < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- return true;
-}
-
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check