From f703651ef870bd6b94ddc98ae07488b7d3fd9335 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:20:14 +0100 Subject: netfilter: NFNL_SUBSYS_IPSET id and NLA_PUT_NET* macros The patch adds the NFNL_SUBSYS_IPSET id and NLA_PUT_NET* macros to the vanilla kernel. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/nfnetlink.h | 3 ++- include/net/netlink.h | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 361d6b5630ee..2b11fc1a86be 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -47,7 +47,8 @@ struct nfgenmsg { #define NFNL_SUBSYS_QUEUE 3 #define NFNL_SUBSYS_ULOG 4 #define NFNL_SUBSYS_OSF 5 -#define NFNL_SUBSYS_COUNT 6 +#define NFNL_SUBSYS_IPSET 6 +#define NFNL_SUBSYS_COUNT 7 #ifdef __KERNEL__ diff --git a/include/net/netlink.h b/include/net/netlink.h index 373f1a900cf4..8a3906a08f5f 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, #define NLA_PUT_BE16(skb, attrtype, value) \ NLA_PUT_TYPE(skb, __be16, attrtype, value) +#define NLA_PUT_NET16(skb, attrtype, value) \ + NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value) + #define NLA_PUT_U32(skb, attrtype, value) \ NLA_PUT_TYPE(skb, u32, attrtype, value) #define NLA_PUT_BE32(skb, attrtype, value) \ NLA_PUT_TYPE(skb, __be32, attrtype, value) +#define NLA_PUT_NET32(skb, attrtype, value) \ + NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value) + #define NLA_PUT_U64(skb, attrtype, value) \ NLA_PUT_TYPE(skb, u64, attrtype, value) #define NLA_PUT_BE64(skb, attrtype, value) \ NLA_PUT_TYPE(skb, __be64, attrtype, value) +#define NLA_PUT_NET64(skb, attrtype, value) \ + NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value) + #define NLA_PUT_STRING(skb, attrtype, value) \ NLA_PUT(skb, attrtype, strlen(value) + 1, value) -- cgit v1.2.3-11-g984f From a7b4f989a629493bb4ec4a354def784d440b32c4 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:28:35 +0100 Subject: netfilter: ipset: IP set core support The patch adds the IP set core support to the kernel. The IP set core implements a netlink (nfnetlink) based protocol by which one can create, destroy, flush, rename, swap, list, save, restore sets, and add, delete, test elements from userspace. For simplicity (and backward compatibilty and for not to force ip(6)tables to be linked with a netlink library) reasons a small getsockopt-based protocol is also kept in order to communicate with the ip(6)tables match and target. The netlink protocol passes all u16, etc values in network order with NLA_F_NET_BYTEORDER flag. The protocol enforces the proper use of the NLA_F_NESTED and NLA_F_NET_BYTEORDER flags. For other kernel subsystems (netfilter match and target) the API contains the functions to add, delete and test elements in sets and the required calls to get/put refereces to the sets before those operations can be performed. The set types (which are implemented in independent modules) are stored in a simple RCU protected list. A set type may have variants: for example without timeout or with timeout support, for IPv4 or for IPv6. The sets (i.e. the pointers to the sets) are stored in an array. The sets are identified by their index in the array, which makes possible easy and fast swapping of sets. The array is protected indirectly by the nfnl mutex from nfnetlink. The content of the sets are protected by the rwlock of the set. There are functional differences between the add/del/test functions for the kernel and userspace: - kernel add/del/test: works on the current packet (i.e. one element) - kernel test: may trigger an "add" operation in order to fill out unspecified parts of the element from the packet (like MAC address) - userspace add/del: works on the netlink message and thus possibly on multiple elements from the IPSET_ATTR_ADT container attribute. - userspace add: may trigger resizing of a set Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/ipset/ip_set.h | 452 +++++++++++++++++++++++++ include/linux/netfilter/ipset/ip_set_getport.h | 11 + include/linux/netfilter/ipset/pfxlen.h | 35 ++ 3 files changed, 498 insertions(+) create mode 100644 include/linux/netfilter/ipset/ip_set.h create mode 100644 include/linux/netfilter/ipset/ip_set_getport.h create mode 100644 include/linux/netfilter/ipset/pfxlen.h (limited to 'include') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h new file mode 100644 index 000000000000..ec333d83f3b4 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set.h @@ -0,0 +1,452 @@ +#ifndef _IP_SET_H +#define _IP_SET_H + +/* Copyright (C) 2000-2002 Joakim Axelsson + * Patrick Schaaf + * Martin Josefsson + * Copyright (C) 2003-2011 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* The protocol version */ +#define IPSET_PROTOCOL 6 + +/* The max length of strings including NUL: set and type identifiers */ +#define IPSET_MAXNAMELEN 32 + +/* Message types and commands */ +enum ipset_cmd { + IPSET_CMD_NONE, + IPSET_CMD_PROTOCOL, /* 1: Return protocol version */ + IPSET_CMD_CREATE, /* 2: Create a new (empty) set */ + IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */ + IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */ + IPSET_CMD_RENAME, /* 5: Rename a set */ + IPSET_CMD_SWAP, /* 6: Swap two sets */ + IPSET_CMD_LIST, /* 7: List sets */ + IPSET_CMD_SAVE, /* 8: Save sets */ + IPSET_CMD_ADD, /* 9: Add an element to a set */ + IPSET_CMD_DEL, /* 10: Delete an element from a set */ + IPSET_CMD_TEST, /* 11: Test an element in a set */ + IPSET_CMD_HEADER, /* 12: Get set header data only */ + IPSET_CMD_TYPE, /* 13: Get set type */ + IPSET_MSG_MAX, /* Netlink message commands */ + + /* Commands in userspace: */ + IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */ + IPSET_CMD_HELP, /* 15: Get help */ + IPSET_CMD_VERSION, /* 16: Get program version */ + IPSET_CMD_QUIT, /* 17: Quit from interactive mode */ + + IPSET_CMD_MAX, + + IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */ +}; + +/* Attributes at command level */ +enum { + IPSET_ATTR_UNSPEC, + IPSET_ATTR_PROTOCOL, /* 1: Protocol version */ + IPSET_ATTR_SETNAME, /* 2: Name of the set */ + IPSET_ATTR_TYPENAME, /* 3: Typename */ + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */ + IPSET_ATTR_REVISION, /* 4: Settype revision */ + IPSET_ATTR_FAMILY, /* 5: Settype family */ + IPSET_ATTR_FLAGS, /* 6: Flags at command level */ + IPSET_ATTR_DATA, /* 7: Nested attributes */ + IPSET_ATTR_ADT, /* 8: Multiple data containers */ + IPSET_ATTR_LINENO, /* 9: Restore lineno */ + IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */ + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */ + __IPSET_ATTR_CMD_MAX, +}; +#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1) + +/* CADT specific attributes */ +enum { + IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1, + IPSET_ATTR_IP_FROM = IPSET_ATTR_IP, + IPSET_ATTR_IP_TO, /* 2 */ + IPSET_ATTR_CIDR, /* 3 */ + IPSET_ATTR_PORT, /* 4 */ + IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT, + IPSET_ATTR_PORT_TO, /* 5 */ + IPSET_ATTR_TIMEOUT, /* 6 */ + IPSET_ATTR_PROTO, /* 7 */ + IPSET_ATTR_CADT_FLAGS, /* 8 */ + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */ + /* Reserve empty slots */ + IPSET_ATTR_CADT_MAX = 16, + /* Create-only specific attributes */ + IPSET_ATTR_GC, + IPSET_ATTR_HASHSIZE, + IPSET_ATTR_MAXELEM, + IPSET_ATTR_NETMASK, + IPSET_ATTR_PROBES, + IPSET_ATTR_RESIZE, + IPSET_ATTR_SIZE, + /* Kernel-only */ + IPSET_ATTR_ELEMENTS, + IPSET_ATTR_REFERENCES, + IPSET_ATTR_MEMSIZE, + + __IPSET_ATTR_CREATE_MAX, +}; +#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1) + +/* ADT specific attributes */ +enum { + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1, + IPSET_ATTR_NAME, + IPSET_ATTR_NAMEREF, + IPSET_ATTR_IP2, + IPSET_ATTR_CIDR2, + __IPSET_ATTR_ADT_MAX, +}; +#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) + +/* IP specific attributes */ +enum { + IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1, + IPSET_ATTR_IPADDR_IPV6, + __IPSET_ATTR_IPADDR_MAX, +}; +#define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1) + +/* Error codes */ +enum ipset_errno { + IPSET_ERR_PRIVATE = 4096, + IPSET_ERR_PROTOCOL, + IPSET_ERR_FIND_TYPE, + IPSET_ERR_MAX_SETS, + IPSET_ERR_BUSY, + IPSET_ERR_EXIST_SETNAME2, + IPSET_ERR_TYPE_MISMATCH, + IPSET_ERR_EXIST, + IPSET_ERR_INVALID_CIDR, + IPSET_ERR_INVALID_NETMASK, + IPSET_ERR_INVALID_FAMILY, + IPSET_ERR_TIMEOUT, + IPSET_ERR_REFERENCED, + IPSET_ERR_IPADDR_IPV4, + IPSET_ERR_IPADDR_IPV6, + + /* Type specific error codes */ + IPSET_ERR_TYPE_SPECIFIC = 4352, +}; + +/* Flags at command level */ +enum ipset_cmd_flags { + IPSET_FLAG_BIT_EXIST = 0, + IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), +}; + +/* Flags at CADT attribute level */ +enum ipset_cadt_flags { + IPSET_FLAG_BIT_BEFORE = 0, + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), +}; + +/* Commands with settype-specific attributes */ +enum ipset_adt { + IPSET_ADD, + IPSET_DEL, + IPSET_TEST, + IPSET_ADT_MAX, + IPSET_CREATE = IPSET_ADT_MAX, + IPSET_CADT_MAX, +}; + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t + * and IPSET_INVALID_ID if you want to increase the max number of sets. + */ +typedef u16 ip_set_id_t; + +#define IPSET_INVALID_ID 65535 + +enum ip_set_dim { + IPSET_DIM_ZERO = 0, + IPSET_DIM_ONE, + IPSET_DIM_TWO, + IPSET_DIM_THREE, + /* Max dimension in elements. + * If changed, new revision of iptables match/target is required. + */ + IPSET_DIM_MAX = 6, +}; + +/* Option flags for kernel operations */ +enum ip_set_kopt { + IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO), + IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE), + IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO), + IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE), +}; + +/* Set features */ +enum ip_set_feature { + IPSET_TYPE_IP_FLAG = 0, + IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG), + IPSET_TYPE_PORT_FLAG = 1, + IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG), + IPSET_TYPE_MAC_FLAG = 2, + IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG), + IPSET_TYPE_IP2_FLAG = 3, + IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG), + IPSET_TYPE_NAME_FLAG = 4, + IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG), + /* Strictly speaking not a feature, but a flag for dumping: + * this settype must be dumped last */ + IPSET_DUMP_LAST_FLAG = 7, + IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG), +}; + +struct ip_set; + +typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout); + +/* Set type, variant-specific part */ +struct ip_set_type_variant { + /* Kernelspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*kadt)(struct ip_set *set, const struct sk_buff * skb, + enum ipset_adt adt, u8 pf, u8 dim, u8 flags); + + /* Userspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*uadt)(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags); + + /* Low level add/del/test functions */ + ipset_adtfn adt[IPSET_ADT_MAX]; + + /* When adding entries and set is full, try to resize the set */ + int (*resize)(struct ip_set *set, bool retried); + /* Destroy the set */ + void (*destroy)(struct ip_set *set); + /* Flush the elements */ + void (*flush)(struct ip_set *set); + /* Expire entries before listing */ + void (*expire)(struct ip_set *set); + /* List set header data */ + int (*head)(struct ip_set *set, struct sk_buff *skb); + /* List elements */ + int (*list)(const struct ip_set *set, struct sk_buff *skb, + struct netlink_callback *cb); + + /* Return true if "b" set is the same as "a" + * according to the create set parameters */ + bool (*same_set)(const struct ip_set *a, const struct ip_set *b); +}; + +/* The core set type structure */ +struct ip_set_type { + struct list_head list; + + /* Typename */ + char name[IPSET_MAXNAMELEN]; + /* Protocol version */ + u8 protocol; + /* Set features to control swapping */ + u8 features; + /* Set type dimension */ + u8 dimension; + /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */ + u8 family; + /* Type revision */ + u8 revision; + + /* Create set */ + int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags); + + /* Attribute policies */ + const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1]; + const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1]; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; +}; + +/* register and unregister set type */ +extern int ip_set_type_register(struct ip_set_type *set_type); +extern void ip_set_type_unregister(struct ip_set_type *set_type); + +/* A generic IP set */ +struct ip_set { + /* The name of the set */ + char name[IPSET_MAXNAMELEN]; + /* Lock protecting the set data */ + rwlock_t lock; + /* References to the set */ + atomic_t ref; + /* The core set type */ + struct ip_set_type *type; + /* The type variant doing the real job */ + const struct ip_set_type_variant *variant; + /* The actual INET family of the set */ + u8 family; + /* The type specific data */ + void *data; +}; + +/* register and unregister set references */ +extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); +extern void ip_set_put_byindex(ip_set_id_t index); +extern const char * ip_set_name_byindex(ip_set_id_t index); +extern ip_set_id_t ip_set_nfnl_get(const char *name); +extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index); +extern void ip_set_nfnl_put(ip_set_id_t index); + +/* API for iptables set match, and SET target */ +extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, + u8 family, u8 dim, u8 flags); +extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, + u8 family, u8 dim, u8 flags); +extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, + u8 family, u8 dim, u8 flags); + +/* Utility functions */ +extern void * ip_set_alloc(size_t size); +extern void ip_set_free(void *members); +extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); +extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); + +static inline int +ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) +{ + __be32 ip; + int ret = ip_set_get_ipaddr4(nla, &ip); + + if (ret) + return ret; + *ipaddr = ntohl(ip); + return 0; +} + +/* Ignore IPSET_ERR_EXIST errors if asked to do so? */ +static inline bool +ip_set_eexist(int ret, u32 flags) +{ + return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST); +} + +/* Check the NLA_F_NET_BYTEORDER flag */ +static inline bool +ip_set_attr_netorder(struct nlattr *tb[], int type) +{ + return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +static inline bool +ip_set_optattr_netorder(struct nlattr *tb[], int type) +{ + return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +/* Useful converters */ +static inline u32 +ip_set_get_h32(const struct nlattr *attr) +{ + return ntohl(nla_get_be32(attr)); +} + +static inline u16 +ip_set_get_h16(const struct nlattr *attr) +{ + return ntohs(nla_get_be16(attr)); +} + +#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) +#define ipset_nest_end(skb, start) nla_nest_end(skb, start) + +#define NLA_PUT_IPADDR4(skb, type, ipaddr) \ +do { \ + struct nlattr *__nested = ipset_nest_start(skb, type); \ + \ + if (!__nested) \ + goto nla_put_failure; \ + NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \ + ipset_nest_end(skb, __nested); \ +} while (0) + +#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \ +do { \ + struct nlattr *__nested = ipset_nest_start(skb, type); \ + \ + if (!__nested) \ + goto nla_put_failure; \ + NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \ + sizeof(struct in6_addr), ipaddrptr); \ + ipset_nest_end(skb, __nested); \ +} while (0) + +/* Get address from skbuff */ +static inline __be32 +ip4addr(const struct sk_buff *skb, bool src) +{ + return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr) +{ + *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) +{ + memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr, + sizeof(*addr)); +} + +/* Calculate the bytes required to store the inclusive range of a-b */ +static inline int +bitmap_bytes(u32 a, u32 b) +{ + return 4 * ((((b - a + 8) / 8) + 3) / 4); +} + +/* Interface to iptables/ip6tables */ + +#define SO_IP_SET 83 + +union ip_set_name_index { + char name[IPSET_MAXNAMELEN]; + ip_set_id_t index; +}; + +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ +struct ip_set_req_get_set { + unsigned op; + unsigned version; + union ip_set_name_index set; +}; + +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */ +/* Uses ip_set_req_get_set */ + +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ +struct ip_set_req_version { + unsigned op; + unsigned version; +}; + +#endif /* __KERNEL__ */ + +#endif /*_IP_SET_H */ diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h new file mode 100644 index 000000000000..694c433298b8 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -0,0 +1,11 @@ +#ifndef _IP_SET_GETPORT_H +#define _IP_SET_GETPORT_H + +extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); +extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); +extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, + __be16 *port); + +#endif /*_IP_SET_GETPORT_H*/ diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h new file mode 100644 index 000000000000..0e1fb50da562 --- /dev/null +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -0,0 +1,35 @@ +#ifndef _PFXLEN_H +#define _PFXLEN_H + +#include +#include + +/* Prefixlen maps, by Jan Engelhardt */ +extern const union nf_inet_addr ip_set_netmask_map[]; +extern const union nf_inet_addr ip_set_hostmask_map[]; + +static inline __be32 +ip_set_netmask(u8 pfxlen) +{ + return ip_set_netmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_netmask6(u8 pfxlen) +{ + return &ip_set_netmask_map[pfxlen].ip6[0]; +} + +static inline u32 +ip_set_hostmask(u8 pfxlen) +{ + return (__force u32) ip_set_hostmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_hostmask6(u8 pfxlen) +{ + return &ip_set_hostmask_map[pfxlen].ip6[0]; +} + +#endif /*_PFXLEN_H */ -- cgit v1.2.3-11-g984f From 72205fc68bd13109576aa6c4c12c740962d28a6c Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:33:17 +0100 Subject: netfilter: ipset: bitmap:ip set type support The module implements the bitmap:ip set type in two flavours, without and with timeout support. In this kind of set one can store IPv4 addresses (or network addresses) from a given range. In order not to waste memory, the timeout version does not rely on the kernel timer for every element to be timed out but on garbage collection. All set types use this mechanism. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/ipset/ip_set_bitmap.h | 31 ++++++ include/linux/netfilter/ipset/ip_set_timeout.h | 127 +++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 include/linux/netfilter/ipset/ip_set_bitmap.h create mode 100644 include/linux/netfilter/ipset/ip_set_timeout.h (limited to 'include') diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h new file mode 100644 index 000000000000..61a9e8746c83 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -0,0 +1,31 @@ +#ifndef __IP_SET_BITMAP_H +#define __IP_SET_BITMAP_H + +/* Bitmap type specific error codes */ +enum { + /* The element is out of the range of the set */ + IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC, + /* The range exceeds the size limit of the set type */ + IPSET_ERR_BITMAP_RANGE_SIZE, +}; + +#ifdef __KERNEL__ +#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF + +/* Common functions */ + +static inline u32 +range_to_mask(u32 from, u32 to, u8 *bits) +{ + u32 mask = 0xFFFFFFFE; + + *bits = 32; + while (--(*bits) > 0 && mask && (to & mask) != from) + mask <<= 1; + + return mask; +} + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_BITMAP_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h new file mode 100644 index 000000000000..9f30c5f2ec1c --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -0,0 +1,127 @@ +#ifndef _IP_SET_TIMEOUT_H +#define _IP_SET_TIMEOUT_H + +/* Copyright (C) 2003-2011 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Set is defined without timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Userspace supplied TIMEOUT parameter: adjust crazy size */ + return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; +} + +#ifdef IP_SET_BITMAP_TIMEOUT + +/* Bitmap specific timeout constants and macros for the entries */ + +/* Bitmap entry is unset */ +#define IPSET_ELEM_UNSET 0 +/* Bitmap entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT (UINT_MAX/2) + +static inline bool +ip_set_timeout_test(unsigned long timeout) +{ + return timeout != IPSET_ELEM_UNSET && + (timeout == IPSET_ELEM_PERMANENT || + time_after(timeout, jiffies)); +} + +static inline bool +ip_set_timeout_expired(unsigned long timeout) +{ + return timeout != IPSET_ELEM_UNSET && + timeout != IPSET_ELEM_PERMANENT && + time_before(timeout, jiffies); +} + +static inline unsigned long +ip_set_timeout_set(u32 timeout) +{ + unsigned long t; + + if (!timeout) + return IPSET_ELEM_PERMANENT; + + t = timeout * HZ + jiffies; + if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) + /* Bingo! */ + t++; + + return t; +} + +static inline u32 +ip_set_timeout_get(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; +} + +#else + +/* Hash specific timeout constants and macros for the entries */ + +/* Hash entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +static inline bool +ip_set_timeout_test(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT || + time_after(timeout, jiffies); +} + +static inline bool +ip_set_timeout_expired(unsigned long timeout) +{ + return timeout != IPSET_ELEM_PERMANENT && + time_before(timeout, jiffies); +} + +static inline unsigned long +ip_set_timeout_set(u32 timeout) +{ + unsigned long t; + + if (!timeout) + return IPSET_ELEM_PERMANENT; + + t = timeout * HZ + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t++; + + return t; +} + +static inline u32 +ip_set_timeout_get(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; +} +#endif /* ! IP_SET_BITMAP_TIMEOUT */ + +#endif /* __KERNEL__ */ + +#endif /* _IP_SET_TIMEOUT_H */ -- cgit v1.2.3-11-g984f From 6c027889696a7a694b0e2f6e3cabadefec7553b6 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:38:36 +0100 Subject: netfilter: ipset: hash:ip set type support The module implements the hash:ip type support in four flavours: for IPv4 or IPv6, both without and with timeout support. All the hash types are based on the "array hash" or ahash structure and functions as a good compromise between minimal memory footprint and speed. The hashing uses arrays to resolve clashes. The hash table is resized (doubled) when searching becomes too long. Resizing can be triggered by userspace add commands only and those are serialized by the nfnl mutex. During resizing the set is read-locked, so the only possible concurrent operations are the kernel side readers. Those are protected by RCU locking. Because of the four flavours and the other hash types, the functions are implemented in general forms in the ip_set_ahash.h header file and the real functions are generated before compiling by macro expansion. Thus the dereferencing of low-level functions and void pointer arguments could be avoided: the low-level functions are inlined, the function arguments are pointers of type-specific structures. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/ipset/ip_set_ahash.h | 1074 ++++++++++++++++++++++++++ include/linux/netfilter/ipset/ip_set_hash.h | 26 + 2 files changed, 1100 insertions(+) create mode 100644 include/linux/netfilter/ipset/ip_set_ahash.h create mode 100644 include/linux/netfilter/ipset/ip_set_hash.h (limited to 'include') diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h new file mode 100644 index 000000000000..ec9d9bea1e37 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -0,0 +1,1074 @@ +#ifndef _IP_SET_AHASH_H +#define _IP_SET_AHASH_H + +#include +#include +#include + +/* Hashing which uses arrays to resolve clashing. The hash table is resized + * (doubled) when searching becomes too long. + * Internally jhash is used with the assumption that the size of the + * stored data is a multiple of sizeof(u32). If storage supports timeout, + * the timeout field must be the last one in the data structure - that field + * is ignored when computing the hash key. + * + * Readers and resizing + * + * Resizing can be triggered by userspace command only, and those + * are serialized by the nfnl mutex. During resizing the set is + * read-locked, so the only possible concurrent operations are + * the kernel side readers. Those must be protected by proper RCU locking. + */ + +/* Number of elements to store in an initial array block */ +#define AHASH_INIT_SIZE 4 +/* Max number of elements to store in an array block */ +#define AHASH_MAX_SIZE (3*4) + +/* A hash bucket */ +struct hbucket { + void *value; /* the array of the values */ + u8 size; /* size of the array */ + u8 pos; /* position of the first free entry */ +}; + +/* The hash table: the table size stored here in order to make resizing easy */ +struct htable { + u8 htable_bits; /* size of hash table == 2^htable_bits */ + struct hbucket bucket[0]; /* hashtable buckets */ +}; + +#define hbucket(h, i) &((h)->bucket[i]) + +/* Book-keeping of the prefixes added to the set */ +struct ip_set_hash_nets { + u8 cidr; /* the different cidr values in the set */ + u32 nets; /* number of elements per cidr */ +}; + +/* The generic ip_set hash structure */ +struct ip_set_hash { + struct htable *table; /* the hash table */ + u32 maxelem; /* max elements in the hash */ + u32 elements; /* current element (vs timeout) */ + u32 initval; /* random jhash init value */ + u32 timeout; /* timeout value, if enabled */ + struct timer_list gc; /* garbage collection when timeout enabled */ +#ifdef IP_SET_HASH_WITH_NETMASK + u8 netmask; /* netmask value for subnets to store */ +#endif +#ifdef IP_SET_HASH_WITH_NETS + struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ +#endif +}; + +/* Compute htable_bits from the user input parameter hashsize */ +static u8 +htable_bits(u32 hashsize) +{ + /* Assume that hashsize == 2^htable_bits */ + u8 bits = fls(hashsize - 1); + if (jhash_size(bits) != hashsize) + /* Round up to the first 2^n value */ + bits = fls(hashsize); + + return bits; +} + +#ifdef IP_SET_HASH_WITH_NETS + +#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) + +/* Network cidr size book keeping when the hash stores different + * sized networks */ +static void +add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) +{ + u8 i; + + ++h->nets[cidr-1].nets; + + pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets); + + if (h->nets[cidr-1].nets > 1) + return; + + /* New cidr size */ + for (i = 0; i < host_mask && h->nets[i].cidr; i++) { + /* Add in increasing prefix order, so larger cidr first */ + if (h->nets[i].cidr < cidr) + swap(h->nets[i].cidr, cidr); + } + if (i < host_mask) + h->nets[i].cidr = cidr; +} + +static void +del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) +{ + u8 i; + + --h->nets[cidr-1].nets; + + pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets); + + if (h->nets[cidr-1].nets != 0) + return; + + /* All entries with this cidr size deleted, so cleanup h->cidr[] */ + for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) { + if (h->nets[i].cidr == cidr) + h->nets[i].cidr = cidr = h->nets[i+1].cidr; + } + h->nets[i - 1].cidr = 0; +} +#endif + +/* Destroy the hashtable part of the set */ +static void +ahash_destroy(struct htable *t) +{ + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) + /* FIXME: use slab cache */ + kfree(n->value); + } + + ip_set_free(t); +} + +/* Calculate the actual memory size of the set data */ +static size_t +ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask) +{ + u32 i; + struct htable *t = h->table; + size_t memsize = sizeof(*h) + + sizeof(*t) +#ifdef IP_SET_HASH_WITH_NETS + + sizeof(struct ip_set_hash_nets) * host_mask +#endif + + jhash_size(t->htable_bits) * sizeof(struct hbucket); + + for (i = 0; i < jhash_size(t->htable_bits); i++) + memsize += t->bucket[i].size * dsize; + + return memsize; +} + +/* Flush a hash type of set: destroy all elements */ +static void +ip_set_hash_flush(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) { + n->size = n->pos = 0; + /* FIXME: use slab cache */ + kfree(n->value); + } + } +#ifdef IP_SET_HASH_WITH_NETS + memset(h->nets, 0, sizeof(struct ip_set_hash_nets) + * SET_HOST_MASK(set->family)); +#endif + h->elements = 0; +} + +/* Destroy a hash type of set */ +static void +ip_set_hash_destroy(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + + if (with_timeout(h->timeout)) + del_timer_sync(&h->gc); + + ahash_destroy(h->table); + kfree(h); + + set->data = NULL; +} + +#define HKEY(data, initval, htable_bits) \ +(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \ + & jhash_mask(htable_bits)) + +#endif /* _IP_SET_AHASH_H */ + +#define CONCAT(a, b, c) a##b##c +#define TOKEN(a, b, c) CONCAT(a, b, c) + +/* Type/family dependent function prototypes */ + +#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) +#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) +#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) +#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out) +#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) +#define type_pf_data_list TOKEN(TYPE, PF, _data_list) +#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) + +#define type_pf_elem TOKEN(TYPE, PF, _elem) +#define type_pf_telem TOKEN(TYPE, PF, _telem) +#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout) +#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired) +#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set) + +#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add) +#define type_pf_add TOKEN(TYPE, PF, _add) +#define type_pf_del TOKEN(TYPE, PF, _del) +#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs) +#define type_pf_test TOKEN(TYPE, PF, _test) + +#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd) +#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem) +#define type_pf_expire TOKEN(TYPE, PF, _expire) +#define type_pf_tadd TOKEN(TYPE, PF, _tadd) +#define type_pf_tdel TOKEN(TYPE, PF, _tdel) +#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs) +#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest) + +#define type_pf_resize TOKEN(TYPE, PF, _resize) +#define type_pf_tresize TOKEN(TYPE, PF, _tresize) +#define type_pf_flush ip_set_hash_flush +#define type_pf_destroy ip_set_hash_destroy +#define type_pf_head TOKEN(TYPE, PF, _head) +#define type_pf_list TOKEN(TYPE, PF, _list) +#define type_pf_tlist TOKEN(TYPE, PF, _tlist) +#define type_pf_same_set TOKEN(TYPE, PF, _same_set) +#define type_pf_kadt TOKEN(TYPE, PF, _kadt) +#define type_pf_uadt TOKEN(TYPE, PF, _uadt) +#define type_pf_gc TOKEN(TYPE, PF, _gc) +#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init) +#define type_pf_variant TOKEN(TYPE, PF, _variant) +#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant) + +/* Flavour without timeout */ + +/* Get the ith element from the array block n */ +#define ahash_data(n, i) \ + ((struct type_pf_elem *)((n)->value) + (i)) + +/* Add an element to the hash table when resizing the set: + * we spare the maintenance of the internal counters. */ +static int +type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value) +{ + if (n->pos >= n->size) { + void *tmp; + + if (n->size >= AHASH_MAX_SIZE) + /* Trigger rehashing */ + return -EAGAIN; + + tmp = kzalloc((n->size + AHASH_INIT_SIZE) + * sizeof(struct type_pf_elem), + GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + if (n->size) { + memcpy(tmp, n->value, + sizeof(struct type_pf_elem) * n->size); + kfree(n->value); + } + n->value = tmp; + n->size += AHASH_INIT_SIZE; + } + type_pf_data_copy(ahash_data(n, n->pos++), value); + return 0; +} + +/* Resize a hash: create a new hash table with doubling the hashsize + * and inserting the elements to it. Repeat until we succeed or + * fail due to memory pressures. */ +static int +type_pf_resize(struct ip_set *set, bool retried) +{ + struct ip_set_hash *h = set->data; + struct htable *t, *orig = h->table; + u8 htable_bits = orig->htable_bits; + const struct type_pf_elem *data; + struct hbucket *n, *m; + u32 i, j; + int ret; + +retry: + ret = 0; + htable_bits++; + pr_debug("attempt to resize set %s from %u to %u, t %p\n", + set->name, orig->htable_bits, htable_bits, orig); + if (!htable_bits) + /* In case we have plenty of memory :-) */ + return -IPSET_ERR_HASH_FULL; + t = ip_set_alloc(sizeof(*t) + + jhash_size(htable_bits) * sizeof(struct hbucket)); + if (!t) + return -ENOMEM; + t->htable_bits = htable_bits; + + read_lock_bh(&set->lock); + for (i = 0; i < jhash_size(orig->htable_bits); i++) { + n = hbucket(orig, i); + for (j = 0; j < n->pos; j++) { + data = ahash_data(n, j); + m = hbucket(t, HKEY(data, h->initval, htable_bits)); + ret = type_pf_elem_add(m, data); + if (ret < 0) { + read_unlock_bh(&set->lock); + ahash_destroy(t); + if (ret == -EAGAIN) + goto retry; + return ret; + } + } + } + + rcu_assign_pointer(h->table, t); + read_unlock_bh(&set->lock); + + /* Give time to other readers of the set */ + synchronize_rcu_bh(); + + pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, + orig->htable_bits, orig, t->htable_bits, t); + ahash_destroy(orig); + + return 0; +} + +/* Add an element to a hash and update the internal counters when succeeded, + * otherwise report the proper error code. */ +static int +type_pf_add(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i, ret = 0; + u32 key; + + if (h->elements >= h->maxelem) + return -IPSET_ERR_HASH_FULL; + + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) + if (type_pf_data_equal(ahash_data(n, i), d)) { + ret = -IPSET_ERR_EXIST; + goto out; + } + + ret = type_pf_elem_add(n, value); + if (ret != 0) + goto out; + +#ifdef IP_SET_HASH_WITH_NETS + add_cidr(h, d->cidr, HOST_MASK); +#endif + h->elements++; +out: + rcu_read_unlock_bh(); + return ret; +} + +/* Delete an element from the hash: swap it with the last element + * and free up space if possible. + */ +static int +type_pf_del(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i; + struct type_pf_elem *data; + u32 key; + + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (!type_pf_data_equal(data, d)) + continue; + if (i != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, ahash_data(n, n->pos - 1)); + + n->pos--; + h->elements--; +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, d->cidr, HOST_MASK); +#endif + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_elem), + GFP_ATOMIC); + if (!tmp) + return 0; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_elem)); + kfree(n->value); + n->value = tmp; + } + return 0; + } + + return -IPSET_ERR_EXIST; +} + +#ifdef IP_SET_HASH_WITH_NETS + +/* Special test function which takes into account the different network + * sizes added to the set */ +static int +type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + const struct type_pf_elem *data; + int i, j = 0; + u32 key; + u8 host_mask = SET_HOST_MASK(set->family); + + pr_debug("test by nets\n"); + for (; j < host_mask && h->nets[j].cidr; j++) { + type_pf_data_netmask(d, h->nets[j].cidr); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (type_pf_data_equal(data, d)) + return 1; + } + } + return 0; +} +#endif + +/* Test whether the element is added to the set */ +static int +type_pf_test(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *d = value; + struct hbucket *n; + const struct type_pf_elem *data; + int i; + u32 key; + +#ifdef IP_SET_HASH_WITH_NETS + /* If we test an IP address and not a network address, + * try all possible network sizes */ + if (d->cidr == SET_HOST_MASK(set->family)) + return type_pf_test_cidrs(set, d, timeout); +#endif + + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (type_pf_data_equal(data, d)) + return 1; + } + return 0; +} + +/* Reply a HEADER request: fill out the header part of the set */ +static int +type_pf_head(struct ip_set *set, struct sk_buff *skb) +{ + const struct ip_set_hash *h = set->data; + struct nlattr *nested; + size_t memsize; + + read_lock_bh(&set->lock); + memsize = ahash_memsize(h, with_timeout(h->timeout) + ? sizeof(struct type_pf_telem) + : sizeof(struct type_pf_elem), + set->family == AF_INET ? 32 : 128); + read_unlock_bh(&set->lock); + + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) + goto nla_put_failure; + NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, + htonl(jhash_size(h->table->htable_bits))); + NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); +#ifdef IP_SET_HASH_WITH_NETMASK + if (h->netmask != HOST_MASK) + NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); +#endif + NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, + htonl(atomic_read(&set->ref) - 1)); + NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); + if (with_timeout(h->timeout)) + NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); + ipset_nest_end(skb, nested); + + return 0; +nla_put_failure: + return -EMSGSIZE; +} + +/* Reply a LIST/SAVE request: dump the elements of the specified set */ +static int +type_pf_list(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct ip_set_hash *h = set->data; + const struct htable *t = h->table; + struct nlattr *atd, *nested; + const struct hbucket *n; + const struct type_pf_elem *data; + u32 first = cb->args[2]; + /* We assume that one hash bucket fills into one page */ + void *incomplete; + int i; + + atd = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!atd) + return -EMSGSIZE; + pr_debug("list hash set %s\n", set->name); + for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { + incomplete = skb_tail_pointer(skb); + n = hbucket(t, cb->args[2]); + pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + pr_debug("list hash %lu hbucket %p i %u, data %p\n", + cb->args[2], n, i, data); + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (cb->args[2] == first) { + nla_nest_cancel(skb, atd); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (type_pf_data_list(skb, data)) + goto nla_put_failure; + ipset_nest_end(skb, nested); + } + } + ipset_nest_end(skb, atd); + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nlmsg_trim(skb, incomplete); + ipset_nest_end(skb, atd); + if (unlikely(first == cb->args[2])) { + pr_warning("Can't list set %s: one bucket does not fit into " + "a message. Please report it!\n", set->name); + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static int +type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, + enum ipset_adt adt, u8 pf, u8 dim, u8 flags); +static int +type_pf_uadt(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags); + +static const struct ip_set_type_variant type_pf_variant = { + .kadt = type_pf_kadt, + .uadt = type_pf_uadt, + .adt = { + [IPSET_ADD] = type_pf_add, + [IPSET_DEL] = type_pf_del, + [IPSET_TEST] = type_pf_test, + }, + .destroy = type_pf_destroy, + .flush = type_pf_flush, + .head = type_pf_head, + .list = type_pf_list, + .resize = type_pf_resize, + .same_set = type_pf_same_set, +}; + +/* Flavour with timeout support */ + +#define ahash_tdata(n, i) \ + (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i)) + +static inline u32 +type_pf_data_timeout(const struct type_pf_elem *data) +{ + const struct type_pf_telem *tdata = + (const struct type_pf_telem *) data; + + return tdata->timeout; +} + +static inline bool +type_pf_data_expired(const struct type_pf_elem *data) +{ + const struct type_pf_telem *tdata = + (const struct type_pf_telem *) data; + + return ip_set_timeout_expired(tdata->timeout); +} + +static inline void +type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) +{ + struct type_pf_telem *tdata = (struct type_pf_telem *) data; + + tdata->timeout = ip_set_timeout_set(timeout); +} + +static int +type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, + u32 timeout) +{ + struct type_pf_elem *data; + + if (n->pos >= n->size) { + void *tmp; + + if (n->size >= AHASH_MAX_SIZE) + /* Trigger rehashing */ + return -EAGAIN; + + tmp = kzalloc((n->size + AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + if (n->size) { + memcpy(tmp, n->value, + sizeof(struct type_pf_telem) * n->size); + kfree(n->value); + } + n->value = tmp; + n->size += AHASH_INIT_SIZE; + } + data = ahash_tdata(n, n->pos++); + type_pf_data_copy(data, value); + type_pf_data_timeout_set(data, timeout); + return 0; +} + +/* Delete expired elements from the hashtable */ +static void +type_pf_expire(struct ip_set_hash *h) +{ + struct htable *t = h->table; + struct hbucket *n; + struct type_pf_elem *data; + u32 i; + int j; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + for (j = 0; j < n->pos; j++) { + data = ahash_tdata(n, j); + if (type_pf_data_expired(data)) { + pr_debug("expired %u/%u\n", i, j); +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, data->cidr, HOST_MASK); +#endif + if (j != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, + ahash_tdata(n, n->pos - 1)); + n->pos--; + h->elements--; + } + } + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + /* Still try to delete expired elements */ + continue; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_telem)); + kfree(n->value); + n->value = tmp; + } + } +} + +static int +type_pf_tresize(struct ip_set *set, bool retried) +{ + struct ip_set_hash *h = set->data; + struct htable *t, *orig = h->table; + u8 htable_bits = orig->htable_bits; + const struct type_pf_elem *data; + struct hbucket *n, *m; + u32 i, j; + int ret; + + /* Try to cleanup once */ + if (!retried) { + i = h->elements; + write_lock_bh(&set->lock); + type_pf_expire(set->data); + write_unlock_bh(&set->lock); + if (h->elements < i) + return 0; + } + +retry: + ret = 0; + htable_bits++; + if (!htable_bits) + /* In case we have plenty of memory :-) */ + return -IPSET_ERR_HASH_FULL; + t = ip_set_alloc(sizeof(*t) + + jhash_size(htable_bits) * sizeof(struct hbucket)); + if (!t) + return -ENOMEM; + t->htable_bits = htable_bits; + + read_lock_bh(&set->lock); + for (i = 0; i < jhash_size(orig->htable_bits); i++) { + n = hbucket(orig, i); + for (j = 0; j < n->pos; j++) { + data = ahash_tdata(n, j); + m = hbucket(t, HKEY(data, h->initval, htable_bits)); + ret = type_pf_elem_tadd(m, data, + type_pf_data_timeout(data)); + if (ret < 0) { + read_unlock_bh(&set->lock); + ahash_destroy(t); + if (ret == -EAGAIN) + goto retry; + return ret; + } + } + } + + rcu_assign_pointer(h->table, t); + read_unlock_bh(&set->lock); + + /* Give time to other readers of the set */ + synchronize_rcu_bh(); + + ahash_destroy(orig); + + return 0; +} + +static int +type_pf_tadd(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + struct type_pf_elem *data; + int ret = 0, i, j = AHASH_MAX_SIZE + 1; + u32 key; + + if (h->elements >= h->maxelem) + /* FIXME: when set is full, we slow down here */ + type_pf_expire(h); + if (h->elements >= h->maxelem) + return -IPSET_ERR_HASH_FULL; + + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (type_pf_data_equal(data, d)) { + if (type_pf_data_expired(data)) + j = i; + else { + ret = -IPSET_ERR_EXIST; + goto out; + } + } else if (j == AHASH_MAX_SIZE + 1 && + type_pf_data_expired(data)) + j = i; + } + if (j != AHASH_MAX_SIZE + 1) { + data = ahash_tdata(n, j); +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, data->cidr, HOST_MASK); + add_cidr(h, d->cidr, HOST_MASK); +#endif + type_pf_data_copy(data, d); + type_pf_data_timeout_set(data, timeout); + goto out; + } + ret = type_pf_elem_tadd(n, d, timeout); + if (ret != 0) + goto out; + +#ifdef IP_SET_HASH_WITH_NETS + add_cidr(h, d->cidr, HOST_MASK); +#endif + h->elements++; +out: + rcu_read_unlock_bh(); + return ret; +} + +static int +type_pf_tdel(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i, ret = 0; + struct type_pf_elem *data; + u32 key; + + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (!type_pf_data_equal(data, d)) + continue; + if (type_pf_data_expired(data)) + ret = -IPSET_ERR_EXIST; + if (i != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); + + n->pos--; + h->elements--; +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, d->cidr, HOST_MASK); +#endif + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + return 0; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_telem)); + kfree(n->value); + n->value = tmp; + } + return 0; + } + + return -IPSET_ERR_EXIST; +} + +#ifdef IP_SET_HASH_WITH_NETS +static int +type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *data; + struct hbucket *n; + int i, j = 0; + u32 key; + u8 host_mask = SET_HOST_MASK(set->family); + + for (; j < host_mask && h->nets[j].cidr; j++) { + type_pf_data_netmask(d, h->nets[j].cidr); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (type_pf_data_equal(data, d)) + return !type_pf_data_expired(data); + } + } + return 0; +} +#endif + +static int +type_pf_ttest(struct ip_set *set, void *value, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *data, *d = value; + struct hbucket *n; + int i; + u32 key; + +#ifdef IP_SET_HASH_WITH_NETS + if (d->cidr == SET_HOST_MASK(set->family)) + return type_pf_ttest_cidrs(set, d, timeout); +#endif + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (type_pf_data_equal(data, d)) + return !type_pf_data_expired(data); + } + return 0; +} + +static int +type_pf_tlist(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct ip_set_hash *h = set->data; + const struct htable *t = h->table; + struct nlattr *atd, *nested; + const struct hbucket *n; + const struct type_pf_elem *data; + u32 first = cb->args[2]; + /* We assume that one hash bucket fills into one page */ + void *incomplete; + int i; + + atd = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!atd) + return -EMSGSIZE; + for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { + incomplete = skb_tail_pointer(skb); + n = hbucket(t, cb->args[2]); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + pr_debug("list %p %u\n", n, i); + if (type_pf_data_expired(data)) + continue; + pr_debug("do list %p %u\n", n, i); + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (cb->args[2] == first) { + nla_nest_cancel(skb, atd); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (type_pf_data_tlist(skb, data)) + goto nla_put_failure; + ipset_nest_end(skb, nested); + } + } + ipset_nest_end(skb, atd); + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nlmsg_trim(skb, incomplete); + ipset_nest_end(skb, atd); + if (unlikely(first == cb->args[2])) { + pr_warning("Can't list set %s: one bucket does not fit into " + "a message. Please report it!\n", set->name); + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static const struct ip_set_type_variant type_pf_tvariant = { + .kadt = type_pf_kadt, + .uadt = type_pf_uadt, + .adt = { + [IPSET_ADD] = type_pf_tadd, + [IPSET_DEL] = type_pf_tdel, + [IPSET_TEST] = type_pf_ttest, + }, + .destroy = type_pf_destroy, + .flush = type_pf_flush, + .head = type_pf_head, + .list = type_pf_tlist, + .resize = type_pf_tresize, + .same_set = type_pf_same_set, +}; + +static void +type_pf_gc(unsigned long ul_set) +{ + struct ip_set *set = (struct ip_set *) ul_set; + struct ip_set_hash *h = set->data; + + pr_debug("called\n"); + write_lock_bh(&set->lock); + type_pf_expire(h); + write_unlock_bh(&set->lock); + + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); +} + +static void +type_pf_gc_init(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + + init_timer(&h->gc); + h->gc.data = (unsigned long) set; + h->gc.function = type_pf_gc; + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); + pr_debug("gc initialized, run in every %u\n", + IPSET_GC_PERIOD(h->timeout)); +} + +#undef type_pf_data_equal +#undef type_pf_data_isnull +#undef type_pf_data_copy +#undef type_pf_data_zero_out +#undef type_pf_data_list +#undef type_pf_data_tlist + +#undef type_pf_elem +#undef type_pf_telem +#undef type_pf_data_timeout +#undef type_pf_data_expired +#undef type_pf_data_netmask +#undef type_pf_data_timeout_set + +#undef type_pf_elem_add +#undef type_pf_add +#undef type_pf_del +#undef type_pf_test_cidrs +#undef type_pf_test + +#undef type_pf_elem_tadd +#undef type_pf_expire +#undef type_pf_tadd +#undef type_pf_tdel +#undef type_pf_ttest_cidrs +#undef type_pf_ttest + +#undef type_pf_resize +#undef type_pf_tresize +#undef type_pf_flush +#undef type_pf_destroy +#undef type_pf_head +#undef type_pf_list +#undef type_pf_tlist +#undef type_pf_same_set +#undef type_pf_kadt +#undef type_pf_uadt +#undef type_pf_gc +#undef type_pf_gc_init +#undef type_pf_variant +#undef type_pf_tvariant diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h new file mode 100644 index 000000000000..b86f15c04524 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_hash.h @@ -0,0 +1,26 @@ +#ifndef __IP_SET_HASH_H +#define __IP_SET_HASH_H + +/* Hash type specific error codes */ +enum { + /* Hash is full */ + IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC, + /* Null-valued element */ + IPSET_ERR_HASH_ELEM, + /* Invalid protocol */ + IPSET_ERR_INVALID_PROTO, + /* Protocol missing but must be specified */ + IPSET_ERR_MISSING_PROTO, +}; + +#ifdef __KERNEL__ + +#define IPSET_DEFAULT_HASHSIZE 1024 +#define IPSET_MIMINAL_HASHSIZE 64 +#define IPSET_DEFAULT_MAXELEM 65536 +#define IPSET_DEFAULT_PROBES 4 +#define IPSET_DEFAULT_RESIZE 100 + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_HASH_H */ -- cgit v1.2.3-11-g984f From f830837f0eed0f9e371b8fd65169365780814bb1 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:54:59 +0100 Subject: netfilter: ipset: list:set set type support The module implements the list:set type support in two flavours: without and with timeout. The sets has two sides: for the userspace, they store the names of other (non list:set type of) sets: one can add, delete and test set names. For the kernel, it forms an ordered union of the member sets: the members sets are tried in order when elements are added, deleted and tested and the process stops at the first success. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/ipset/ip_set_list.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 include/linux/netfilter/ipset/ip_set_list.h (limited to 'include') diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h new file mode 100644 index 000000000000..40a63f302613 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -0,0 +1,27 @@ +#ifndef __IP_SET_LIST_H +#define __IP_SET_LIST_H + +/* List type specific error codes */ +enum { + /* Set name to be added/deleted/tested does not exist. */ + IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC, + /* list:set type is not permitted to add */ + IPSET_ERR_LOOP, + /* Missing reference set */ + IPSET_ERR_BEFORE, + /* Reference set does not exist */ + IPSET_ERR_NAMEREF, + /* Set is full */ + IPSET_ERR_LIST_FULL, + /* Reference set is not added to the set */ + IPSET_ERR_REF_EXIST, +}; + +#ifdef __KERNEL__ + +#define IP_SET_LIST_DEFAULT_SIZE 8 +#define IP_SET_LIST_MIN_SIZE 4 + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_LIST_H */ -- cgit v1.2.3-11-g984f From d956798d82d2d331c031301965d69e17a1a48a2b Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 1 Feb 2011 15:56:00 +0100 Subject: netfilter: xtables: "set" match and "SET" target support The patch adds the combined module of the "SET" target and "set" match to netfilter. Both the previous and the current revisions are supported. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter/xt_set.h | 55 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 include/linux/netfilter/xt_set.h (limited to 'include') diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h new file mode 100644 index 000000000000..69b2bd1fb818 --- /dev/null +++ b/include/linux/netfilter/xt_set.h @@ -0,0 +1,55 @@ +#ifndef _XT_SET_H +#define _XT_SET_H + +#include + +/* Revision 0 interface: backward compatible with netfilter/iptables */ + +/* + * Option flags for kernel operations (xt_set_info_v0) + */ +#define IPSET_SRC 0x01 /* Source match/add */ +#define IPSET_DST 0x02 /* Destination match/add */ +#define IPSET_MATCH_INV 0x04 /* Inverse matching */ + +struct xt_set_info_v0 { + ip_set_id_t index; + union { + __u32 flags[IPSET_DIM_MAX + 1]; + struct { + __u32 __flags[IPSET_DIM_MAX]; + __u8 dim; + __u8 flags; + } compat; + } u; +}; + +/* match and target infos */ +struct xt_set_info_match_v0 { + struct xt_set_info_v0 match_set; +}; + +struct xt_set_info_target_v0 { + struct xt_set_info_v0 add_set; + struct xt_set_info_v0 del_set; +}; + +/* Revision 1: current interface to netfilter/iptables */ + +struct xt_set_info { + ip_set_id_t index; + __u8 dim; + __u8 flags; +}; + +/* match and target infos */ +struct xt_set_info_match { + struct xt_set_info match_set; +}; + +struct xt_set_info_target { + struct xt_set_info add_set; + struct xt_set_info del_set; +}; + +#endif /*_XT_SET_H*/ -- cgit v1.2.3-11-g984f From a13676476e289ba03a23e27df130c7f33ab00e2f Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 1 Feb 2011 18:27:51 +0100 Subject: IPVS: Remove unused variables These variables are unused as a result of the recent netns work. Signed-off-by: Simon Horman Acked-by: Randy Dunlap Signed-off-by: Hans Schillstrom Tested-by: Hans Schillstrom Signed-off-by: Patrick McHardy --- include/net/ip_vs.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index b23bea62f708..5d75feadf4f4 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1109,8 +1109,6 @@ extern int ip_vs_icmp_xmit_v6 * we are loaded. Just set ip_vs_drop_rate to 'n' and * we start to drop 1/rate of the packets */ -extern int ip_vs_drop_rate; -extern int ip_vs_drop_counter; static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { -- cgit v1.2.3-11-g984f From e3e241b2769b27669d05f0a05083acd21b4faa2c Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 1 Feb 2011 18:52:42 +0100 Subject: netfilter: ipset: install ipset related header files Signed-off-by: Patrick McHardy --- include/linux/netfilter/Kbuild | 3 +++ include/linux/netfilter/ipset/Kbuild | 4 ++++ include/linux/netfilter/xt_set.h | 1 + 3 files changed, 8 insertions(+) create mode 100644 include/linux/netfilter/ipset/Kbuild (limited to 'include') diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 89c0d1e20d72..ba19544cce94 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -1,3 +1,5 @@ +header-y += ipset/ + header-y += nf_conntrack_common.h header-y += nf_conntrack_ftp.h header-y += nf_conntrack_sctp.h @@ -55,6 +57,7 @@ header-y += xt_quota.h header-y += xt_rateest.h header-y += xt_realm.h header-y += xt_recent.h +header-y += xt_set.h header-y += xt_sctp.h header-y += xt_socket.h header-y += xt_state.h diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild new file mode 100644 index 000000000000..601fe71d34d5 --- /dev/null +++ b/include/linux/netfilter/ipset/Kbuild @@ -0,0 +1,4 @@ +header-y += ip_set.h +header-y += ip_set_bitmap.h +header-y += ip_set_hash.h +header-y += ip_set_list.h diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h index 69b2bd1fb818..081f1ded2842 100644 --- a/include/linux/netfilter/xt_set.h +++ b/include/linux/netfilter/xt_set.h @@ -1,6 +1,7 @@ #ifndef _XT_SET_H #define _XT_SET_H +#include #include /* Revision 0 interface: backward compatible with netfilter/iptables */ -- cgit v1.2.3-11-g984f From 724bab476bcac9f7d0b5204cb06e346216d42166 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 2 Feb 2011 23:50:01 +0100 Subject: netfilter: ipset: fix linking with CONFIG_IPV6=n Add a dummy ip_set_get_ip6_port function that unconditionally returns false for CONFIG_IPV6=n and convert the real function to ipv6_skip_exthdr() to avoid pulling in the ip6_tables module when loading ipset. Signed-off-by: Patrick McHardy --- include/linux/netfilter/ipset/ip_set_getport.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h index 694c433298b8..3882a81a3b3c 100644 --- a/include/linux/netfilter/ipset/ip_set_getport.h +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -3,8 +3,18 @@ extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); + +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); +#else +static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto) +{ + return false; +} +#endif + extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port); -- cgit v1.2.3-11-g984f From 9291747f118d6404e509747b85ff5f6dfec368d2 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 3 Feb 2011 00:05:43 +0100 Subject: netfilter: xtables: add device group match Add a new 'devgroup' match to match on the device group of the incoming and outgoing network device of a packet. Signed-off-by: Patrick McHardy --- include/linux/netfilter/Kbuild | 1 + include/linux/netfilter/xt_devgroup.h | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 include/linux/netfilter/xt_devgroup.h (limited to 'include') diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index ba19544cce94..15e83bf3dd58 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -37,6 +37,7 @@ header-y += xt_connmark.h header-y += xt_conntrack.h header-y += xt_cpu.h header-y += xt_dccp.h +header-y += xt_devgroup.h header-y += xt_dscp.h header-y += xt_esp.h header-y += xt_hashlimit.h diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h new file mode 100644 index 000000000000..1babde0ec900 --- /dev/null +++ b/include/linux/netfilter/xt_devgroup.h @@ -0,0 +1,21 @@ +#ifndef _XT_DEVGROUP_H +#define _XT_DEVGROUP_H + +#include + +enum xt_devgroup_flags { + XT_DEVGROUP_MATCH_SRC = 0x1, + XT_DEVGROUP_INVERT_SRC = 0x2, + XT_DEVGROUP_MATCH_DST = 0x4, + XT_DEVGROUP_INVERT_DST = 0x8, +}; + +struct xt_devgroup_info { + __u32 flags; + __u32 src_group; + __u32 src_mask; + __u32 dst_group; + __u32 dst_mask; +}; + +#endif /* _XT_DEVGROUP_H */ -- cgit v1.2.3-11-g984f