aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2013-10-30 11:50:51 +0100
committerDavid S. Miller <davem@davemloft.net>2013-11-03 23:04:57 -0500
commit2817a336d4d533fb8b68719723cd60ea7dd7c09e (patch)
tree71aac3aa2a48588fd0b5372cca9499bd1cbe2a5f /include/linux/skbuff.h
parentlib: crc32: add test cases for crc32{, c}_combine routines (diff)
downloadlinux-dev-2817a336d4d533fb8b68719723cd60ea7dd7c09e.tar.xz
linux-dev-2817a336d4d533fb8b68719723cd60ea7dd7c09e.zip
net: skb_checksum: allow custom update/combine for walking skb
Currently, skb_checksum walks over 1) linearized, 2) frags[], and 3) frag_list data and calculats the one's complement, a 32 bit result suitable for feeding into itself or csum_tcpudp_magic(), but unsuitable for SCTP as we're calculating CRC32c there. Hence, in order to not re-implement the very same function in SCTP (and maybe other protocols) over and over again, use an update() + combine() callback internally to allow for walking over the skb with different algorithms. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2c154976394b..44727b5d4981 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2360,8 +2360,6 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
-__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
- __wsum csum);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
@@ -2373,9 +2371,18 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+struct skb_checksum_ops {
+ __wsum (*update)(const void *mem, int len, __wsum wsum);
+ __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+};
+
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum, const struct skb_checksum_ops *ops);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum);
+
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
{