From a0265d28b3a5877b5b8edd14eb12a2ccb60ab1f3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 17 Apr 2014 13:45:03 +0800 Subject: net: Add __dev_forward_skb This patch adds the helper __dev_forward_skb which is identical to dev_forward_skb except that it doesn't actually inject the skb into the stack. This is useful where we wish to have finer control over how the packet is injected, e.g., via netif_rx_ni or netif_receive_skb. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/core/dev.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) (limited to 'net/core') diff --git a/net/core/dev.c b/net/core/dev.c index d2c8a06b3a98..11d70e3afefa 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(is_skb_forwardable); +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) +{ + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + + if (unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); + + return 0; +} +EXPORT_SYMBOL_GPL(__dev_forward_skb); + /** * dev_forward_skb - loopback an skb to another netif * @@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - skb_scrub_packet(skb, true); - skb->protocol = eth_type_trans(skb, dev); - - return netif_rx_internal(skb); + return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); -- cgit v1.2.3-59-g8ed1b From 3de0b592394d17b2c41a261a6a493a521213f299 Mon Sep 17 00:00:00 2001 From: Venkata Duvvuru Date: Mon, 21 Apr 2014 15:37:59 +0530 Subject: ethtool: Support for configurable RSS hash key This ethtool patch primarily copies the ioctl command data structures from/to the User space and invokes the driver hook. Signed-off-by: Venkat Duvvuru Signed-off-by: David S. Miller --- include/linux/ethtool.h | 13 +++ include/uapi/linux/ethtool.h | 32 +++++++ net/core/ethtool.c | 221 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 252 insertions(+), 14 deletions(-) (limited to 'net/core') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 0a114d05f68d..212f537fc686 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -154,13 +154,23 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @reset: Reset (part of) the device, as specified by a bitmask of * flags from &enum ethtool_reset_flags. Returns a negative * error code or zero. + * @get_rxfh_key_size: Get the size of the RX flow hash key. + * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. * Will not be called if @get_rxfh_indir_size returns zero. + * @get_rxfh: Get the contents of the RX flow hash indirection table and hash + * key. + * Will not be called if @get_rxfh_indir_size and @get_rxfh_key_size + * returns zero. * Returns a negative error code or zero. * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. * Will not be called if @get_rxfh_indir_size returns zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table and + * hash key. + * Will not be called if @get_rxfh_indir_size and @get_rxfh_key_size + * returns zero. * Returns a negative error code or zero. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or @@ -232,7 +242,10 @@ struct ethtool_ops { int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *, u8 *); + int (*set_rxfh)(struct net_device *, u32 *, u8 *); int (*get_rxfh_indir)(struct net_device *, u32 *); int (*set_rxfh_indir)(struct net_device *, const u32 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index fd161e91b6d7..d47d31d6fa0e 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -846,6 +846,35 @@ struct ethtool_rxfh_indir { __u32 ring_index[0]; }; +/** + * struct ethtool_rxfh - command to get/set RX flow hash indir or/and hash key. + * @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH + * @rss_context: RSS context identifier. + * @indir_size: On entry, the array size of the user buffer, which may be zero. + * On return from %ETHTOOL_GRSSH, the array size of the hardware + * indirection table. + * @key_size: On entry, the array size of the user buffer in bytes, + * which may be zero. + * On return from %ETHTOOL_GRSSH, the size of the RSS hash key. + * @rsvd: Reserved for future extensions. + * @rss_config: RX ring/queue index for each hash value i.e., indirection table + * of size @indir_size followed by hash key of size @key_size. + * + * For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the + * size should be returned. For %ETHTOOL_SRSSH, a @indir_size of 0xDEADBEEF + * means that indir table setting is not requested and a @indir_size of zero + * means the indir table should be reset to default values. This last feature + * is not supported by the original implementations. + */ +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u32 rsvd[2]; + __u32 rss_config[0]; +}; + /** * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW @@ -1118,6 +1147,9 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */ #define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */ +#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ +#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ + /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET #define SPARC_ETH_SSET ETHTOOL_SSET diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 640ba0e5831c..1d72786ef866 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -557,6 +557,23 @@ err_out: return ret; } +static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, + struct ethtool_rxnfc *rx_rings, + u32 size) +{ + int ret = 0, i; + + if (copy_from_user(indir, useraddr, size * sizeof(indir[0]))) + ret = -EFAULT; + + /* Validate ring indices */ + for (i = 0; i < size; i++) { + if (indir[i] >= rx_rings->data) + ret = -EINVAL; + } + return ret; +} + static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { @@ -613,6 +630,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, u32 *indir; const struct ethtool_ops *ops = dev->ethtool_ops; int ret; + u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir || !ops->get_rxnfc) @@ -643,28 +661,196 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, for (i = 0; i < dev_size; i++) indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); } else { - if (copy_from_user(indir, - useraddr + - offsetof(struct ethtool_rxfh_indir, - ring_index[0]), - dev_size * sizeof(indir[0]))) { + ret = ethtool_copy_validate_indir(indir, + useraddr + ringidx_offset, + &rx_rings, + dev_size); + if (ret) + goto out; + } + + ret = ops->set_rxfh_indir(dev, indir); + +out: + kfree(indir); + return ret; +} + +static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, + void __user *useraddr) +{ + int ret; + const struct ethtool_ops *ops = dev->ethtool_ops; + u32 user_indir_size = 0, user_key_size = 0; + u32 dev_indir_size = 0, dev_key_size = 0; + u32 total_size; + u32 indir_offset, indir_bytes; + u32 key_offset; + u32 *indir = NULL; + u8 *hkey = NULL; + u8 *rss_config; + + if (!(dev->ethtool_ops->get_rxfh_indir_size || + dev->ethtool_ops->get_rxfh_key_size) || + !dev->ethtool_ops->get_rxfh) + return -EOPNOTSUPP; + + if (ops->get_rxfh_indir_size) + dev_indir_size = ops->get_rxfh_indir_size(dev); + + indir_offset = offsetof(struct ethtool_rxfh, indir_size); + + if (copy_from_user(&user_indir_size, + useraddr + indir_offset, + sizeof(user_indir_size))) + return -EFAULT; + + if (copy_to_user(useraddr + indir_offset, + &dev_indir_size, sizeof(dev_indir_size))) + return -EFAULT; + + if (ops->get_rxfh_key_size) + dev_key_size = ops->get_rxfh_key_size(dev); + + if ((dev_key_size + dev_indir_size) == 0) + return -EOPNOTSUPP; + + key_offset = offsetof(struct ethtool_rxfh, key_size); + + if (copy_from_user(&user_key_size, + useraddr + key_offset, + sizeof(user_key_size))) + return -EFAULT; + + if (copy_to_user(useraddr + key_offset, + &dev_key_size, sizeof(dev_key_size))) + return -EFAULT; + + /* If the user buffer size is 0, this is just a query for the + * device table size and key size. Otherwise, if the User size is + * not equal to device table size or key size it's an error. + */ + if (!user_indir_size && !user_key_size) + return 0; + + if ((user_indir_size && (user_indir_size != dev_indir_size)) || + (user_key_size && (user_key_size != dev_key_size))) + return -EINVAL; + + indir_bytes = user_indir_size * sizeof(indir[0]); + total_size = indir_bytes + user_key_size; + rss_config = kzalloc(total_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + if (user_indir_size) + indir = (u32 *)rss_config; + + if (user_key_size) + hkey = rss_config + indir_bytes; + + ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey); + if (!ret) { + if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh, rss_config[0]), + rss_config, total_size)) ret = -EFAULT; + } + + kfree(rss_config); + + return ret; +} + +static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, + void __user *useraddr) +{ + int ret; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc rx_rings; + u32 user_indir_size = 0, dev_indir_size = 0, i; + u32 user_key_size = 0, dev_key_size = 0; + u32 *indir = NULL, indir_bytes = 0; + u8 *hkey = NULL; + u8 *rss_config; + u32 indir_offset, key_offset; + u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); + + if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) || + !ops->get_rxnfc || !ops->set_rxfh) + return -EOPNOTSUPP; + + if (ops->get_rxfh_indir_size) + dev_indir_size = ops->get_rxfh_indir_size(dev); + + indir_offset = offsetof(struct ethtool_rxfh, indir_size); + if (copy_from_user(&user_indir_size, + useraddr + indir_offset, + sizeof(user_indir_size))) + return -EFAULT; + + if (ops->get_rxfh_key_size) + dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev); + + if ((dev_key_size + dev_indir_size) == 0) + return -EOPNOTSUPP; + + key_offset = offsetof(struct ethtool_rxfh, key_size); + if (copy_from_user(&user_key_size, + useraddr + key_offset, + sizeof(user_key_size))) + return -EFAULT; + + /* If either indir or hash key is valid, proceed further. + */ + if ((user_indir_size && ((user_indir_size != 0xDEADBEEF) && + user_indir_size != dev_indir_size)) || + (user_key_size && (user_key_size != dev_key_size))) + return -EINVAL; + + if (user_indir_size != 0xDEADBEEF) + indir_bytes = dev_indir_size * sizeof(indir[0]); + + rss_config = kzalloc(indir_bytes + user_key_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + rx_rings.cmd = ETHTOOL_GRXRINGS; + ret = ops->get_rxnfc(dev, &rx_rings, NULL); + if (ret) + goto out; + + /* user_indir_size == 0 means reset the indir table to default. + * user_indir_size == 0xDEADBEEF means indir setting is not requested. + */ + if (user_indir_size && user_indir_size != 0xDEADBEEF) { + indir = (u32 *)rss_config; + ret = ethtool_copy_validate_indir(indir, + useraddr + rss_cfg_offset, + &rx_rings, + user_indir_size); + if (ret) goto out; - } + } else if (user_indir_size == 0) { + indir = (u32 *)rss_config; + for (i = 0; i < dev_indir_size; i++) + indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); + } - /* Validate ring indices */ - for (i = 0; i < dev_size; i++) { - if (indir[i] >= rx_rings.data) { - ret = -EINVAL; - goto out; - } + if (user_key_size) { + hkey = rss_config + indir_bytes; + if (copy_from_user(hkey, + useraddr + rss_cfg_offset + indir_bytes, + user_key_size)) { + ret = -EFAULT; + goto out; } } - ret = ops->set_rxfh_indir(dev, indir); + ret = ops->set_rxfh(dev, indir, hkey); out: - kfree(indir); + kfree(rss_config); return ret; } @@ -1491,6 +1677,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXFHINDIR: + case ETHTOOL_GRSSH: case ETHTOOL_GFEATURES: case ETHTOOL_GCHANNELS: case ETHTOOL_GET_TS_INFO: @@ -1628,6 +1815,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_SRXFHINDIR: rc = ethtool_set_rxfh_indir(dev, useraddr); break; + case ETHTOOL_GRSSH: + rc = ethtool_get_rxfh(dev, useraddr); + break; + case ETHTOOL_SRSSH: + rc = ethtool_set_rxfh(dev, useraddr); + break; case ETHTOOL_GFEATURES: rc = ethtool_get_features(dev, useraddr); break; -- cgit v1.2.3-59-g8ed1b From 4cd3675ebf74d7f559038ded6aa8088e4099a83d Mon Sep 17 00:00:00 2001 From: Chema Gonzalez Date: Mon, 21 Apr 2014 09:21:24 -0700 Subject: filter: added BPF random opcode Added a new ancillary load (bpf call in eBPF parlance) that produces a 32-bit random number. We are implementing it as an ancillary load (instead of an ISA opcode) because (a) it is simpler, (b) allows easy JITing, and (c) seems more in line with generic ISAs that do not have "get a random number" as a instruction, but as an OS call. The main use for this ancillary load is to perform random packet sampling. Signed-off-by: Chema Gonzalez Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 13 +++++++++++++ include/linux/filter.h | 1 + include/uapi/linux/filter.h | 3 ++- net/core/filter.c | 12 ++++++++++++ tools/net/bpf_exp.l | 1 + tools/net/bpf_exp.y | 11 ++++++++++- 6 files changed, 39 insertions(+), 2 deletions(-) (limited to 'net/core') diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 81f940f4e884..82e1cb0b3da8 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -281,6 +281,7 @@ Possible BPF extensions are shown in the following table: cpu raw_smp_processor_id() vlan_tci vlan_tx_tag_get(skb) vlan_pr vlan_tx_tag_present(skb) + rand prandom_u32() These extensions can also be prefixed with '#'. Examples for low-level BPF: @@ -308,6 +309,18 @@ Examples for low-level BPF: ret #-1 drop: ret #0 +** icmp random packet sampling, 1 in 4 + ldh [12] + jne #0x800, drop + ldb [23] + jneq #1, drop + # get a random uint32 number + ld rand + mod #4 + jneq #1, drop + ret #-1 + drop: ret #0 + ** SECCOMP filter example: ld [4] /* offsetof(struct seccomp_data, arch) */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 024fd03e5d18..759abf78dd61 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -223,6 +223,7 @@ enum { BPF_S_ANC_VLAN_TAG, BPF_S_ANC_VLAN_TAG_PRESENT, BPF_S_ANC_PAY_OFFSET, + BPF_S_ANC_RANDOM, }; #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h index 8eb9ccaa5b48..253b4d42cf2b 100644 --- a/include/uapi/linux/filter.h +++ b/include/uapi/linux/filter.h @@ -130,7 +130,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define SKF_AD_VLAN_TAG 44 #define SKF_AD_VLAN_TAG_PRESENT 48 #define SKF_AD_PAY_OFFSET 52 -#define SKF_AD_MAX 56 +#define SKF_AD_RANDOM 56 +#define SKF_AD_MAX 60 #define SKF_NET_OFF (-0x100000) #define SKF_LL_OFF (-0x200000) diff --git a/net/core/filter.c b/net/core/filter.c index cd58614660cf..78a636e60a0b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -643,6 +643,12 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) return raw_smp_processor_id(); } +/* note that this only generates 32-bit random numbers */ +static u64 __get_random_u32(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +{ + return (u64)prandom_u32(); +} + /* Register mappings for user programs. */ #define A_REG 0 #define X_REG 7 @@ -779,6 +785,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: + case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = ctx */ insn->code = BPF_ALU64 | BPF_MOV | BPF_X; insn->a_reg = ARG1_REG; @@ -812,6 +819,9 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_CPU: insn->imm = __get_raw_cpu_id - __bpf_call_base; break; + case SKF_AD_OFF + SKF_AD_RANDOM: + insn->imm = __get_random_u32 - __bpf_call_base; + break; } break; @@ -1362,6 +1372,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) ANCILLARY(VLAN_TAG); ANCILLARY(VLAN_TAG_PRESENT); ANCILLARY(PAY_OFFSET); + ANCILLARY(RANDOM); } /* ancillary operation unknown or unsupported */ @@ -1746,6 +1757,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, + [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l index bf7be77ddd62..833a96611da6 100644 --- a/tools/net/bpf_exp.l +++ b/tools/net/bpf_exp.l @@ -92,6 +92,7 @@ extern void yyerror(const char *str); "#"?("cpu") { return K_CPU; } "#"?("vlan_tci") { return K_VLANT; } "#"?("vlan_pr") { return K_VLANP; } +"#"?("rand") { return K_RAND; } ":" { return ':'; } "," { return ','; } diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y index d15efc989ef5..e6306c51c26f 100644 --- a/tools/net/bpf_exp.y +++ b/tools/net/bpf_exp.y @@ -56,7 +56,7 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type); %token OP_LDXI %token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE -%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF +%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND %token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%' @@ -164,6 +164,9 @@ ldb | OP_LDB K_POFF { bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LDB K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } ; ldh @@ -212,6 +215,9 @@ ldh | OP_LDH K_POFF { bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LDH K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } ; ldi @@ -265,6 +271,9 @@ ld | OP_LD K_POFF { bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LD K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } | OP_LD 'M' '[' number ']' { bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); } | OP_LD '[' 'x' '+' number ']' { -- cgit v1.2.3-59-g8ed1b From 1818ce4dc59a71a53faa33a7ca050ca9c406bf66 Mon Sep 17 00:00:00 2001 From: xiao jin Date: Fri, 25 Apr 2014 08:50:54 +0800 Subject: net_namespace: trivial cleanup Do not initialize net_kill_list twice. list_replace_init() already takes care of initializing net_kill_list. We don't need to initialize it with LIST_HEAD() beforehand. Signed-off-by: xiao jin Reviewed-by: David Cohen Signed-off-by: David S. Miller --- net/core/net_namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 81d3a9a08453..05e949d48204 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work) { const struct pernet_operations *ops; struct net *net, *tmp; - LIST_HEAD(net_kill_list); + struct list_head net_kill_list; LIST_HEAD(net_exit_list); /* Atomically snapshot the list of namespaces to cleanup */ -- cgit v1.2.3-59-g8ed1b From 266a16468432a77f2e1395b3de5df3ca699b1a9a Mon Sep 17 00:00:00 2001 From: Jean Sacren Date: Sun, 27 Apr 2014 02:20:38 -0600 Subject: ethtool: exit the loop when invalid index occurs The commit 3de0b592394d ("ethtool: Support for configurable RSS hash key") introduced a new function ethtool_copy_validate_indir() with full iteration of the loop to validate the ring indices, which could be an overkill. To minimize the impact, we ought to exit the loop as soon as the invalid index occurs for the very first time. The remaining loop simply doesn't serve any more purpose. Signed-off-by: Jean Sacren Cc: Venkata Duvvuru Signed-off-by: David S. Miller --- net/core/ethtool.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1d72786ef866..aa8978ac47d2 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -568,8 +568,10 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, /* Validate ring indices */ for (i = 0; i < size; i++) { - if (indir[i] >= rx_rings->data) + if (indir[i] >= rx_rings->data) { ret = -EINVAL; + break; + } } return ret; } -- cgit v1.2.3-59-g8ed1b From 5bcfedf06f7fdf9efcf65dc11198e9012f7530f4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 1 May 2014 18:34:18 +0200 Subject: net: filter: simplify label names from jump-table This patch simplifies label naming for the BPF jump-table. When we define labels via DL(), we just concatenate/textify the combination of instruction opcode which consists of the class, subclass, word size, target register and so on. Each time we leave BPF_ prefix intact, so that e.g. the preprocessor generates a label BPF_ALU_BPF_ADD_BPF_X for DL(BPF_ALU, BPF_ADD, BPF_X) whereas a label name of ALU_ADD_X is much more easy to grasp. Pure cleanup only. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 3 + net/core/filter.c | 308 ++++++++++++++++++++++++------------------------- 2 files changed, 157 insertions(+), 154 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 759abf78dd61..b042d1db127f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -37,6 +37,9 @@ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ +/* Placeholder/dummy for 0 */ +#define BPF_0 0 + /* BPF has 10 general purpose 64-bit registers and stack frame. */ #define MAX_BPF_REG 11 diff --git a/net/core/filter.c b/net/core/filter.c index 7c4db3dd3d1e..a1784e95b049 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -156,94 +156,94 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) static const void *jumptable[256] = { [0 ... 255] = &&default_label, /* Now overwrite non-defaults ... */ -#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C - DL(BPF_ALU, BPF_ADD, BPF_X), - DL(BPF_ALU, BPF_ADD, BPF_K), - DL(BPF_ALU, BPF_SUB, BPF_X), - DL(BPF_ALU, BPF_SUB, BPF_K), - DL(BPF_ALU, BPF_AND, BPF_X), - DL(BPF_ALU, BPF_AND, BPF_K), - DL(BPF_ALU, BPF_OR, BPF_X), - DL(BPF_ALU, BPF_OR, BPF_K), - DL(BPF_ALU, BPF_LSH, BPF_X), - DL(BPF_ALU, BPF_LSH, BPF_K), - DL(BPF_ALU, BPF_RSH, BPF_X), - DL(BPF_ALU, BPF_RSH, BPF_K), - DL(BPF_ALU, BPF_XOR, BPF_X), - DL(BPF_ALU, BPF_XOR, BPF_K), - DL(BPF_ALU, BPF_MUL, BPF_X), - DL(BPF_ALU, BPF_MUL, BPF_K), - DL(BPF_ALU, BPF_MOV, BPF_X), - DL(BPF_ALU, BPF_MOV, BPF_K), - DL(BPF_ALU, BPF_DIV, BPF_X), - DL(BPF_ALU, BPF_DIV, BPF_K), - DL(BPF_ALU, BPF_MOD, BPF_X), - DL(BPF_ALU, BPF_MOD, BPF_K), - DL(BPF_ALU, BPF_NEG, 0), - DL(BPF_ALU, BPF_END, BPF_TO_BE), - DL(BPF_ALU, BPF_END, BPF_TO_LE), - DL(BPF_ALU64, BPF_ADD, BPF_X), - DL(BPF_ALU64, BPF_ADD, BPF_K), - DL(BPF_ALU64, BPF_SUB, BPF_X), - DL(BPF_ALU64, BPF_SUB, BPF_K), - DL(BPF_ALU64, BPF_AND, BPF_X), - DL(BPF_ALU64, BPF_AND, BPF_K), - DL(BPF_ALU64, BPF_OR, BPF_X), - DL(BPF_ALU64, BPF_OR, BPF_K), - DL(BPF_ALU64, BPF_LSH, BPF_X), - DL(BPF_ALU64, BPF_LSH, BPF_K), - DL(BPF_ALU64, BPF_RSH, BPF_X), - DL(BPF_ALU64, BPF_RSH, BPF_K), - DL(BPF_ALU64, BPF_XOR, BPF_X), - DL(BPF_ALU64, BPF_XOR, BPF_K), - DL(BPF_ALU64, BPF_MUL, BPF_X), - DL(BPF_ALU64, BPF_MUL, BPF_K), - DL(BPF_ALU64, BPF_MOV, BPF_X), - DL(BPF_ALU64, BPF_MOV, BPF_K), - DL(BPF_ALU64, BPF_ARSH, BPF_X), - DL(BPF_ALU64, BPF_ARSH, BPF_K), - DL(BPF_ALU64, BPF_DIV, BPF_X), - DL(BPF_ALU64, BPF_DIV, BPF_K), - DL(BPF_ALU64, BPF_MOD, BPF_X), - DL(BPF_ALU64, BPF_MOD, BPF_K), - DL(BPF_ALU64, BPF_NEG, 0), - DL(BPF_JMP, BPF_CALL, 0), - DL(BPF_JMP, BPF_JA, 0), - DL(BPF_JMP, BPF_JEQ, BPF_X), - DL(BPF_JMP, BPF_JEQ, BPF_K), - DL(BPF_JMP, BPF_JNE, BPF_X), - DL(BPF_JMP, BPF_JNE, BPF_K), - DL(BPF_JMP, BPF_JGT, BPF_X), - DL(BPF_JMP, BPF_JGT, BPF_K), - DL(BPF_JMP, BPF_JGE, BPF_X), - DL(BPF_JMP, BPF_JGE, BPF_K), - DL(BPF_JMP, BPF_JSGT, BPF_X), - DL(BPF_JMP, BPF_JSGT, BPF_K), - DL(BPF_JMP, BPF_JSGE, BPF_X), - DL(BPF_JMP, BPF_JSGE, BPF_K), - DL(BPF_JMP, BPF_JSET, BPF_X), - DL(BPF_JMP, BPF_JSET, BPF_K), - DL(BPF_JMP, BPF_EXIT, 0), - DL(BPF_STX, BPF_MEM, BPF_B), - DL(BPF_STX, BPF_MEM, BPF_H), - DL(BPF_STX, BPF_MEM, BPF_W), - DL(BPF_STX, BPF_MEM, BPF_DW), - DL(BPF_STX, BPF_XADD, BPF_W), - DL(BPF_STX, BPF_XADD, BPF_DW), - DL(BPF_ST, BPF_MEM, BPF_B), - DL(BPF_ST, BPF_MEM, BPF_H), - DL(BPF_ST, BPF_MEM, BPF_W), - DL(BPF_ST, BPF_MEM, BPF_DW), - DL(BPF_LDX, BPF_MEM, BPF_B), - DL(BPF_LDX, BPF_MEM, BPF_H), - DL(BPF_LDX, BPF_MEM, BPF_W), - DL(BPF_LDX, BPF_MEM, BPF_DW), - DL(BPF_LD, BPF_ABS, BPF_W), - DL(BPF_LD, BPF_ABS, BPF_H), - DL(BPF_LD, BPF_ABS, BPF_B), - DL(BPF_LD, BPF_IND, BPF_W), - DL(BPF_LD, BPF_IND, BPF_H), - DL(BPF_LD, BPF_IND, BPF_B), +#define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C + DL(ALU, ADD, X), + DL(ALU, ADD, K), + DL(ALU, SUB, X), + DL(ALU, SUB, K), + DL(ALU, AND, X), + DL(ALU, AND, K), + DL(ALU, OR, X), + DL(ALU, OR, K), + DL(ALU, LSH, X), + DL(ALU, LSH, K), + DL(ALU, RSH, X), + DL(ALU, RSH, K), + DL(ALU, XOR, X), + DL(ALU, XOR, K), + DL(ALU, MUL, X), + DL(ALU, MUL, K), + DL(ALU, MOV, X), + DL(ALU, MOV, K), + DL(ALU, DIV, X), + DL(ALU, DIV, K), + DL(ALU, MOD, X), + DL(ALU, MOD, K), + DL(ALU, NEG, 0), + DL(ALU, END, TO_BE), + DL(ALU, END, TO_LE), + DL(ALU64, ADD, X), + DL(ALU64, ADD, K), + DL(ALU64, SUB, X), + DL(ALU64, SUB, K), + DL(ALU64, AND, X), + DL(ALU64, AND, K), + DL(ALU64, OR, X), + DL(ALU64, OR, K), + DL(ALU64, LSH, X), + DL(ALU64, LSH, K), + DL(ALU64, RSH, X), + DL(ALU64, RSH, K), + DL(ALU64, XOR, X), + DL(ALU64, XOR, K), + DL(ALU64, MUL, X), + DL(ALU64, MUL, K), + DL(ALU64, MOV, X), + DL(ALU64, MOV, K), + DL(ALU64, ARSH, X), + DL(ALU64, ARSH, K), + DL(ALU64, DIV, X), + DL(ALU64, DIV, K), + DL(ALU64, MOD, X), + DL(ALU64, MOD, K), + DL(ALU64, NEG, 0), + DL(JMP, CALL, 0), + DL(JMP, JA, 0), + DL(JMP, JEQ, X), + DL(JMP, JEQ, K), + DL(JMP, JNE, X), + DL(JMP, JNE, K), + DL(JMP, JGT, X), + DL(JMP, JGT, K), + DL(JMP, JGE, X), + DL(JMP, JGE, K), + DL(JMP, JSGT, X), + DL(JMP, JSGT, K), + DL(JMP, JSGE, X), + DL(JMP, JSGE, K), + DL(JMP, JSET, X), + DL(JMP, JSET, K), + DL(JMP, EXIT, 0), + DL(STX, MEM, B), + DL(STX, MEM, H), + DL(STX, MEM, W), + DL(STX, MEM, DW), + DL(STX, XADD, W), + DL(STX, XADD, DW), + DL(ST, MEM, B), + DL(ST, MEM, H), + DL(ST, MEM, W), + DL(ST, MEM, DW), + DL(LDX, MEM, B), + DL(LDX, MEM, H), + DL(LDX, MEM, W), + DL(LDX, MEM, DW), + DL(LD, ABS, W), + DL(LD, ABS, H), + DL(LD, ABS, B), + DL(LD, IND, W), + DL(LD, IND, H), + DL(LD, IND, B), #undef DL }; @@ -257,93 +257,93 @@ select_insn: /* ALU */ #define ALU(OPCODE, OP) \ - BPF_ALU64_##OPCODE##_BPF_X: \ + ALU64_##OPCODE##_X: \ A = A OP X; \ CONT; \ - BPF_ALU_##OPCODE##_BPF_X: \ + ALU_##OPCODE##_X: \ A = (u32) A OP (u32) X; \ CONT; \ - BPF_ALU64_##OPCODE##_BPF_K: \ + ALU64_##OPCODE##_K: \ A = A OP K; \ CONT; \ - BPF_ALU_##OPCODE##_BPF_K: \ + ALU_##OPCODE##_K: \ A = (u32) A OP (u32) K; \ CONT; - ALU(BPF_ADD, +) - ALU(BPF_SUB, -) - ALU(BPF_AND, &) - ALU(BPF_OR, |) - ALU(BPF_LSH, <<) - ALU(BPF_RSH, >>) - ALU(BPF_XOR, ^) - ALU(BPF_MUL, *) + ALU(ADD, +) + ALU(SUB, -) + ALU(AND, &) + ALU(OR, |) + ALU(LSH, <<) + ALU(RSH, >>) + ALU(XOR, ^) + ALU(MUL, *) #undef ALU - BPF_ALU_BPF_NEG_0: + ALU_NEG_0: A = (u32) -A; CONT; - BPF_ALU64_BPF_NEG_0: + ALU64_NEG_0: A = -A; CONT; - BPF_ALU_BPF_MOV_BPF_X: + ALU_MOV_X: A = (u32) X; CONT; - BPF_ALU_BPF_MOV_BPF_K: + ALU_MOV_K: A = (u32) K; CONT; - BPF_ALU64_BPF_MOV_BPF_X: + ALU64_MOV_X: A = X; CONT; - BPF_ALU64_BPF_MOV_BPF_K: + ALU64_MOV_K: A = K; CONT; - BPF_ALU64_BPF_ARSH_BPF_X: + ALU64_ARSH_X: (*(s64 *) &A) >>= X; CONT; - BPF_ALU64_BPF_ARSH_BPF_K: + ALU64_ARSH_K: (*(s64 *) &A) >>= K; CONT; - BPF_ALU64_BPF_MOD_BPF_X: + ALU64_MOD_X: if (unlikely(X == 0)) return 0; tmp = A; A = do_div(tmp, X); CONT; - BPF_ALU_BPF_MOD_BPF_X: + ALU_MOD_X: if (unlikely(X == 0)) return 0; tmp = (u32) A; A = do_div(tmp, (u32) X); CONT; - BPF_ALU64_BPF_MOD_BPF_K: + ALU64_MOD_K: tmp = A; A = do_div(tmp, K); CONT; - BPF_ALU_BPF_MOD_BPF_K: + ALU_MOD_K: tmp = (u32) A; A = do_div(tmp, (u32) K); CONT; - BPF_ALU64_BPF_DIV_BPF_X: + ALU64_DIV_X: if (unlikely(X == 0)) return 0; do_div(A, X); CONT; - BPF_ALU_BPF_DIV_BPF_X: + ALU_DIV_X: if (unlikely(X == 0)) return 0; tmp = (u32) A; do_div(tmp, (u32) X); A = (u32) tmp; CONT; - BPF_ALU64_BPF_DIV_BPF_K: + ALU64_DIV_K: do_div(A, K); CONT; - BPF_ALU_BPF_DIV_BPF_K: + ALU_DIV_K: tmp = (u32) A; do_div(tmp, (u32) K); A = (u32) tmp; CONT; - BPF_ALU_BPF_END_BPF_TO_BE: + ALU_END_TO_BE: switch (K) { case 16: A = (__force u16) cpu_to_be16(A); @@ -356,7 +356,7 @@ select_insn: break; } CONT; - BPF_ALU_BPF_END_BPF_TO_LE: + ALU_END_TO_LE: switch (K) { case 16: A = (__force u16) cpu_to_le16(A); @@ -371,7 +371,7 @@ select_insn: CONT; /* CALL */ - BPF_JMP_BPF_CALL_0: + JMP_CALL_0: /* Function call scratches R1-R5 registers, preserves R6-R9, * and stores return value into R0. */ @@ -380,122 +380,122 @@ select_insn: CONT; /* JMP */ - BPF_JMP_BPF_JA_0: + JMP_JA_0: insn += insn->off; CONT; - BPF_JMP_BPF_JEQ_BPF_X: + JMP_JEQ_X: if (A == X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JEQ_BPF_K: + JMP_JEQ_K: if (A == K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JNE_BPF_X: + JMP_JNE_X: if (A != X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JNE_BPF_K: + JMP_JNE_K: if (A != K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGT_BPF_X: + JMP_JGT_X: if (A > X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGT_BPF_K: + JMP_JGT_K: if (A > K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGE_BPF_X: + JMP_JGE_X: if (A >= X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGE_BPF_K: + JMP_JGE_K: if (A >= K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGT_BPF_X: - if (((s64)A) > ((s64)X)) { + JMP_JSGT_X: + if (((s64) A) > ((s64) X)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGT_BPF_K: - if (((s64)A) > ((s64)K)) { + JMP_JSGT_K: + if (((s64) A) > ((s64) K)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGE_BPF_X: - if (((s64)A) >= ((s64)X)) { + JMP_JSGE_X: + if (((s64) A) >= ((s64) X)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGE_BPF_K: - if (((s64)A) >= ((s64)K)) { + JMP_JSGE_K: + if (((s64) A) >= ((s64) K)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSET_BPF_X: + JMP_JSET_X: if (A & X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSET_BPF_K: + JMP_JSET_K: if (A & K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_EXIT_0: + JMP_EXIT_0: return R0; /* STX and ST and LDX*/ #define LDST(SIZEOP, SIZE) \ - BPF_STX_BPF_MEM_##SIZEOP: \ + STX_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (A + insn->off) = X; \ CONT; \ - BPF_ST_BPF_MEM_##SIZEOP: \ + ST_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (A + insn->off) = K; \ CONT; \ - BPF_LDX_BPF_MEM_##SIZEOP: \ + LDX_MEM_##SIZEOP: \ A = *(SIZE *)(unsigned long) (X + insn->off); \ CONT; - LDST(BPF_B, u8) - LDST(BPF_H, u16) - LDST(BPF_W, u32) - LDST(BPF_DW, u64) + LDST(B, u8) + LDST(H, u16) + LDST(W, u32) + LDST(DW, u64) #undef LDST - BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ + STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ atomic_add((u32) X, (atomic_t *)(unsigned long) (A + insn->off)); CONT; - BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ + STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ atomic64_add((u64) X, (atomic64_t *)(unsigned long) (A + insn->off)); CONT; - BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ + LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ off = K; load_word: /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only @@ -524,7 +524,7 @@ load_word: CONT; } return 0; - BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ + LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; load_half: ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); @@ -533,7 +533,7 @@ load_half: CONT; } return 0; - BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ + LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */ off = K; load_byte: ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); @@ -542,13 +542,13 @@ load_byte: CONT; } return 0; - BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ + LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; goto load_word; - BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ + LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ off = K + X; goto load_half; - BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ + LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */ off = K + X; goto load_byte; -- cgit v1.2.3-59-g8ed1b From 30743837dd204d2b04fd4e9d3db78cc7b118c81a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 1 May 2014 18:34:19 +0200 Subject: net: filter: make register naming more comprehensible The current code is a bit hard to parse on which registers can be used, how they are mapped and all play together. It makes much more sense to define this a bit more clearly so that the code is a bit more intuitive. This patch cleans this up, and makes naming a bit more consistent among the code. This also allows for moving some of the defines into the header file. Clearing of A and X registers in __sk_run_filter() do not get a particular register name assigned as they have not an 'official' function, but rather just result from the concrete initial mapping of old BPF programs. Since for BPF helper functions for BPF_CALL we already use small letters, so be consistent here as well. No functional changes. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 44 ++++++++-- net/core/filter.c | 215 +++++++++++++++++++++++++------------------------ 2 files changed, 145 insertions(+), 114 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index b042d1db127f..ed1efab10b8f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -40,16 +40,47 @@ /* Placeholder/dummy for 0 */ #define BPF_0 0 +/* Register numbers */ +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + /* BPF has 10 general purpose 64-bit registers and stack frame. */ -#define MAX_BPF_REG 11 +#define MAX_BPF_REG __MAX_BPF_REG + +/* ArgX, context and stack frame pointer register positions. Note, + * Arg1, Arg2, Arg3, etc are used as argument mappings of function + * calls in BPF_CALL instruction. + */ +#define BPF_REG_ARG1 BPF_REG_1 +#define BPF_REG_ARG2 BPF_REG_2 +#define BPF_REG_ARG3 BPF_REG_3 +#define BPF_REG_ARG4 BPF_REG_4 +#define BPF_REG_ARG5 BPF_REG_5 +#define BPF_REG_CTX BPF_REG_6 +#define BPF_REG_FP BPF_REG_10 + +/* Additional register mappings for converted user programs. */ +#define BPF_REG_A BPF_REG_0 +#define BPF_REG_X BPF_REG_7 +#define BPF_REG_TMP BPF_REG_8 /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 -/* Arg1, context and stack frame pointer register positions. */ -#define ARG1_REG 1 -#define CTX_REG 6 -#define FP_REG 10 +/* Macro to invoke filter function. */ +#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) struct sock_filter_int { __u8 code; /* opcode */ @@ -100,9 +131,6 @@ static inline unsigned int sk_filter_size(unsigned int proglen) #define sk_filter_proglen(fprog) \ (fprog->len * sizeof(fprog->filter[0])) -#define SK_RUN_FILTER(filter, ctx) \ - (*filter->bpf_func)(ctx, filter->insnsi) - int sk_filter(struct sock *sk, struct sk_buff *skb); u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, diff --git a/net/core/filter.c b/net/core/filter.c index a1784e95b049..c93ca8d66f37 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -45,6 +45,27 @@ #include #include +/* Registers */ +#define R0 regs[BPF_REG_0] +#define R1 regs[BPF_REG_1] +#define R2 regs[BPF_REG_2] +#define R3 regs[BPF_REG_3] +#define R4 regs[BPF_REG_4] +#define R5 regs[BPF_REG_5] +#define R6 regs[BPF_REG_6] +#define R7 regs[BPF_REG_7] +#define R8 regs[BPF_REG_8] +#define R9 regs[BPF_REG_9] +#define R10 regs[BPF_REG_10] + +/* Named registers */ +#define A regs[insn->a_reg] +#define X regs[insn->x_reg] +#define FP regs[BPF_REG_FP] +#define ARG1 regs[BPF_REG_ARG1] +#define CTX regs[BPF_REG_CTX] +#define K insn->imm + /* No hurry in this branch * * Exported for the bpf jit load helper. @@ -122,13 +143,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return 0; } -/* Register mappings for user programs. */ -#define A_REG 0 -#define X_REG 7 -#define TMP_REG 8 -#define ARG2_REG 2 -#define ARG3_REG 3 - /** * __sk_run_filter - run a filter on a given context * @ctx: buffer to run the filter on @@ -142,17 +156,6 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) { u64 stack[MAX_BPF_STACK / sizeof(u64)]; u64 regs[MAX_BPF_REG], tmp; - void *ptr; - int off; - -#define K insn->imm -#define A regs[insn->a_reg] -#define X regs[insn->x_reg] -#define R0 regs[0] - -#define CONT ({insn++; goto select_insn; }) -#define CONT_JMP ({insn++; goto select_insn; }) - static const void *jumptable[256] = { [0 ... 255] = &&default_label, /* Now overwrite non-defaults ... */ @@ -246,11 +249,18 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) DL(LD, IND, B), #undef DL }; + void *ptr; + int off; - regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; - regs[ARG1_REG] = (u64) (unsigned long) ctx; - regs[A_REG] = 0; - regs[X_REG] = 0; +#define CONT ({ insn++; goto select_insn; }) +#define CONT_JMP ({ insn++; goto select_insn; }) + + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; + ARG1 = (u64) (unsigned long) ctx; + + /* Register for user BPF programs need to be reset first. */ + regs[BPF_REG_A] = 0; + regs[BPF_REG_X] = 0; select_insn: goto *jumptable[insn->code]; @@ -375,8 +385,7 @@ select_insn: /* Function call scratches R1-R5 registers, preserves R6-R9, * and stores return value into R0. */ - R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3], - regs[4], regs[5]); + R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5); CONT; /* JMP */ @@ -500,7 +509,7 @@ select_insn: load_word: /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only * appearing in the programs where ctx == skb. All programs - * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter() + * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter() * saves it in R6, internal BPF verifier will check that * R6 == ctx. * @@ -556,13 +565,6 @@ load_byte: /* If we ever reach this, we have a bug somewhere. */ WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); return 0; -#undef CONT_JMP -#undef CONT - -#undef R0 -#undef X -#undef A -#undef K } u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, @@ -594,14 +596,14 @@ static unsigned int pkt_type_offset(void) return -1; } -static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; return __skb_get_poff(skb); } -static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; @@ -612,17 +614,17 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) if (skb->len < sizeof(struct nlattr)) return 0; - if (A > skb->len - sizeof(struct nlattr)) + if (a > skb->len - sizeof(struct nlattr)) return 0; - nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); + nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } -static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; @@ -633,27 +635,27 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) if (skb->len < sizeof(struct nlattr)) return 0; - if (A > skb->len - sizeof(struct nlattr)) + if (a > skb->len - sizeof(struct nlattr)) return 0; - nla = (struct nlattr *) &skb->data[A]; - if (nla->nla_len > skb->len - A) + nla = (struct nlattr *) &skb->data[a]; + if (nla->nla_len > skb->len - a) return 0; - nla = nla_find_nested(nla, X); + nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } -static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { return raw_smp_processor_id(); } /* note that this only generates 32-bit random numbers */ -static u64 __get_random_u32(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { return (u64)prandom_u32(); } @@ -668,28 +670,28 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, protocol); insn++; /* A = ntohs(A) [emitting a nop or swap16] */ insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = 16; break; case SKF_AD_OFF + SKF_AD_PKTTYPE: insn->code = BPF_LDX | BPF_MEM | BPF_B; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = pkt_type_offset(); if (insn->off < 0) return false; insn++; insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = PKT_TYPE_MAX; break; @@ -699,13 +701,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp, insn->code = BPF_LDX | BPF_MEM | BPF_DW; else insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = TMP_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_TMP; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, dev); insn++; insn->code = BPF_JMP | BPF_JNE | BPF_K; - insn->a_reg = TMP_REG; + insn->a_reg = BPF_REG_TMP; insn->imm = 0; insn->off = 1; insn++; @@ -716,8 +718,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); - insn->a_reg = A_REG; - insn->x_reg = TMP_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_TMP; if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { insn->code = BPF_LDX | BPF_MEM | BPF_W; @@ -732,8 +734,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, mark); break; @@ -741,8 +743,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, hash); break; @@ -750,8 +752,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, queue_mapping); break; @@ -760,8 +762,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, vlan_tci); insn++; @@ -769,16 +771,16 @@ static bool convert_bpf_extensions(struct sock_filter *fp, if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = ~VLAN_TAG_PRESENT; } else { insn->code = BPF_ALU | BPF_RSH | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = 12; insn++; insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = 1; } break; @@ -790,20 +792,20 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = ctx */ insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG1_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_ARG1; + insn->x_reg = BPF_REG_CTX; insn++; /* arg2 = A */ insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG2_REG; - insn->x_reg = A_REG; + insn->a_reg = BPF_REG_ARG2; + insn->x_reg = BPF_REG_A; insn++; /* arg3 = X */ insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG3_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_ARG3; + insn->x_reg = BPF_REG_X; insn++; /* Emit call(ctx, arg2=A, arg3=X) */ @@ -829,8 +831,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_ALU_XOR_X: insn->code = BPF_ALU | BPF_XOR | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_X; break; default: @@ -880,7 +882,7 @@ int sk_convert_filter(struct sock_filter *prog, int len, u8 bpf_src; BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); - BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG); + BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); if (len <= 0 || len >= BPF_MAXINSNS) return -EINVAL; @@ -897,8 +899,8 @@ do_pass: if (new_insn) { new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - new_insn->a_reg = CTX_REG; - new_insn->x_reg = ARG1_REG; + new_insn->a_reg = BPF_REG_CTX; + new_insn->x_reg = BPF_REG_ARG1; } new_insn++; @@ -948,8 +950,8 @@ do_pass: break; insn->code = fp->code; - insn->a_reg = A_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_X; insn->imm = fp->k; break; @@ -983,16 +985,16 @@ do_pass: * in compare insn. */ insn->code = BPF_ALU | BPF_MOV | BPF_K; - insn->a_reg = TMP_REG; + insn->a_reg = BPF_REG_TMP; insn->imm = fp->k; insn++; - insn->a_reg = A_REG; - insn->x_reg = TMP_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { - insn->a_reg = A_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_X; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); } @@ -1027,33 +1029,33 @@ do_pass: /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = TMP_REG; - insn->x_reg = A_REG; + insn->a_reg = BPF_REG_TMP; + insn->x_reg = BPF_REG_A; insn++; insn->code = BPF_LD | BPF_ABS | BPF_B; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = fp->k; insn++; insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = 0xf; insn++; insn->code = BPF_ALU | BPF_LSH | BPF_K; - insn->a_reg = A_REG; + insn->a_reg = BPF_REG_A; insn->imm = 2; insn++; insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = X_REG; - insn->x_reg = A_REG; + insn->a_reg = BPF_REG_X; + insn->x_reg = BPF_REG_A; insn++; insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = TMP_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_TMP; break; /* RET_K, RET_A are remaped into 2 insns. */ @@ -1063,7 +1065,7 @@ do_pass: (BPF_RVAL(fp->code) == BPF_K ? BPF_K : BPF_X); insn->a_reg = 0; - insn->x_reg = A_REG; + insn->x_reg = BPF_REG_A; insn->imm = fp->k; insn++; @@ -1074,8 +1076,9 @@ do_pass: case BPF_ST: case BPF_STX: insn->code = BPF_STX | BPF_MEM | BPF_W; - insn->a_reg = FP_REG; - insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG; + insn->a_reg = BPF_REG_FP; + insn->x_reg = fp->code == BPF_ST ? + BPF_REG_A : BPF_REG_X; insn->off = -(BPF_MEMWORDS - fp->k) * 4; break; @@ -1084,8 +1087,8 @@ do_pass: case BPF_LDX | BPF_MEM: insn->code = BPF_LDX | BPF_MEM | BPF_W; insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; - insn->x_reg = FP_REG; + BPF_REG_A : BPF_REG_X; + insn->x_reg = BPF_REG_FP; insn->off = -(BPF_MEMWORDS - fp->k) * 4; break; @@ -1094,22 +1097,22 @@ do_pass: case BPF_LDX | BPF_IMM: insn->code = BPF_ALU | BPF_MOV | BPF_K; insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; + BPF_REG_A : BPF_REG_X; insn->imm = fp->k; break; /* X = A */ case BPF_MISC | BPF_TAX: insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = X_REG; - insn->x_reg = A_REG; + insn->a_reg = BPF_REG_X; + insn->x_reg = BPF_REG_A; break; /* A = X */ case BPF_MISC | BPF_TXA: insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_X; break; /* A = skb->len or X = skb->len */ @@ -1117,16 +1120,16 @@ do_pass: case BPF_LDX | BPF_W | BPF_LEN: insn->code = BPF_LDX | BPF_MEM | BPF_W; insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; - insn->x_reg = CTX_REG; + BPF_REG_A : BPF_REG_X; + insn->x_reg = BPF_REG_CTX; insn->off = offsetof(struct sk_buff, len); break; /* access seccomp_data fields */ case BPF_LDX | BPF_ABS | BPF_W: insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_CTX; insn->off = fp->k; break; -- cgit v1.2.3-59-g8ed1b From eb9672f4a14bb7058c44efcc31c89737a7724d2c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 1 May 2014 18:34:20 +0200 Subject: net: filter: misc/various cleanups This contains only some minor misc cleanpus. We can spare us the extra variable declaration in __skb_get_pay_offset(), the cast in __get_random_u32() is rather unnecessary and in __sk_migrate_realloc() we can remove the memcpy() and do a direct assignment of the structs. Latter was suggested by Fengguang Wu found with coccinelle. Also, remaining pointer casts of long should be unsigned long instead. Suggested-by: Fengguang Wu Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index c93ca8d66f37..eb020a7d6f55 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -78,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; + return NULL; } @@ -89,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); + return bpf_internal_load_pointer_neg_helper(skb, k, size); } @@ -598,14 +599,12 @@ static unsigned int pkt_type_offset(void) static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; - - return __skb_get_poff(skb); + return __skb_get_poff((struct sk_buff *)(unsigned long) ctx); } static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) @@ -626,7 +625,7 @@ static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) @@ -657,7 +656,7 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) /* note that this only generates 32-bit random numbers */ static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - return (u64)prandom_u32(); + return prandom_u32(); } static bool convert_bpf_extensions(struct sock_filter *fp, @@ -1475,7 +1474,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp, fp_new = sock_kmalloc(sk, len, GFP_KERNEL); if (fp_new) { - memcpy(fp_new, fp, sizeof(struct sk_filter)); + *fp_new = *fp; /* As we're kepping orig_prog in fp_new along, * we need to make sure we're not evicting it * from the old fp. -- cgit v1.2.3-59-g8ed1b From 56bfa7ee7c8892f1aa61797e4b8fec84b31d29b3 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 1 May 2014 11:40:30 -0700 Subject: unregister_netdevice : move RTM_DELLINK to until after ndo_uninit This patch fixes ordering of rtnl notifications during unregister_netdevice by moving RTM_DELLINK notification to until after ndo_uninit. The problem was seen with unregistering bond netdevices. bond ndo_uninit callback generates a few RTM_NEWLINK notifications for NETDEV_CHANGEADDR and NETDEV_FEAT_CHANGE. This is seen mostly when the bond is deleted with slaves still enslaved to the bond. During unregister netdevice (rollback_registered_many to be specific) bond ndo_uninit is called after RTM_DELLINK notification goes out. This results in userspace seeing RTM_DELLINK followed by a couple of RTM_NEWLINK's. In userspace problem was seen with libnl. libnl cache deletes the bond when it sees RTM_DELLINK and re-adds the bond with the following RTM_NEWLINK. Resulting in a stale bond entry in libnl cache when the kernel has already deleted the bond. This patch has been tested for bond, bridges and vlan devices. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net/core') diff --git a/net/core/dev.c b/net/core/dev.c index 11d70e3afefa..fe0b9cd69cb6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5606,10 +5606,6 @@ static void rollback_registered_many(struct list_head *head) */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); - /* * Flush the unicast and multicast chains */ @@ -5619,6 +5615,10 @@ static void rollback_registered_many(struct list_head *head) if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); + if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); + /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev)); -- cgit v1.2.3-59-g8ed1b From 9739eef13c926645fbf88bcb77e66442fa75d688 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 8 May 2014 14:10:51 -0700 Subject: net: filter: make BPF conversion more readable Introduce BPF helper macros to define instructions (similar to old BPF_STMT/BPF_JUMP macros) Use them while converting classic BPF to internal and in BPF testsuite later. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 51 ++++++++++++++++++ net/core/filter.c | 142 +++++++++++++++++-------------------------------- 2 files changed, 101 insertions(+), 92 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index ed1efab10b8f..4457b383961c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -79,6 +79,57 @@ enum { /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 +/* bpf_add|sub|...: a += x, bpf_mov: a = x */ +#define BPF_ALU64_REG(op, a, x) \ + ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) +#define BPF_ALU32_REG(op, a, x) \ + ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) + +/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ +#define BPF_ALU64_IMM(op, a, imm) \ + ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) +#define BPF_ALU32_IMM(op, a, imm) \ + ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) + +/* R0 = *(uint *) (skb->data + off) */ +#define BPF_LD_ABS(size, off) \ + ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) + +/* R0 = *(uint *) (skb->data + x + off) */ +#define BPF_LD_IND(size, x, off) \ + ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) + +/* a = *(uint *) (x + off) */ +#define BPF_LDX_MEM(sz, a, x, off) \ + ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) + +/* if (a 'op' x) goto pc+off */ +#define BPF_JMP_REG(op, a, x, off) \ + ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) + +/* if (a 'op' imm) goto pc+off */ +#define BPF_JMP_IMM(op, a, imm, off) \ + ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) + +#define BPF_EXIT_INSN() \ + ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) + +static inline int size_to_bpf(int size) +{ + switch (size) { + case 1: + return BPF_B; + case 2: + return BPF_H; + case 4: + return BPF_W; + case 8: + return BPF_DW; + default: + return -EINVAL; + } +} + /* Macro to invoke filter function. */ #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) diff --git a/net/core/filter.c b/net/core/filter.c index eb020a7d6f55..9aaa05ad8fe3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -668,10 +668,9 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, protocol); + /* A = *(u16 *) (ctx + offsetof(protocol)) */ + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, protocol)); insn++; /* A = ntohs(A) [emitting a nop or swap16] */ @@ -681,37 +680,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_PKTTYPE: - insn->code = BPF_LDX | BPF_MEM | BPF_B; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = pkt_type_offset(); + *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, + pkt_type_offset()); if (insn->off < 0) return false; insn++; - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = PKT_TYPE_MAX; + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); break; case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: - if (FIELD_SIZEOF(struct sk_buff, dev) == 8) - insn->code = BPF_LDX | BPF_MEM | BPF_DW; - else - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_REG_TMP; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, dev); + *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)), + BPF_REG_TMP, BPF_REG_CTX, + offsetof(struct sk_buff, dev)); insn++; - insn->code = BPF_JMP | BPF_JNE | BPF_K; - insn->a_reg = BPF_REG_TMP; - insn->imm = 0; - insn->off = 1; + /* if (tmp != 0) goto pc+1 */ + *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); insn++; - insn->code = BPF_JMP | BPF_EXIT; + *insn = BPF_EXIT_INSN(); insn++; BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); @@ -732,55 +721,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, mark); + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, mark)); break; case SKF_AD_OFF + SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, hash); + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, hash)); break; case SKF_AD_OFF + SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, queue_mapping); + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, queue_mapping)); break; case SKF_AD_OFF + SKF_AD_VLAN_TAG: case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, vlan_tci); + /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, vlan_tci)); insn++; BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = ~VLAN_TAG_PRESENT; + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, + ~VLAN_TAG_PRESENT); } else { - insn->code = BPF_ALU | BPF_RSH | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = 12; + /* A >>= 12 */ + *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); insn++; - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = 1; + /* A &= 1 */ + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); } break; @@ -790,21 +769,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = ctx */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_ARG1; - insn->x_reg = BPF_REG_CTX; + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); insn++; /* arg2 = A */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_ARG2; - insn->x_reg = BPF_REG_A; + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); insn++; /* arg3 = X */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_ARG3; - insn->x_reg = BPF_REG_X; + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); insn++; /* Emit call(ctx, arg2=A, arg3=X) */ @@ -829,9 +802,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_ALU_XOR_X: - insn->code = BPF_ALU | BPF_XOR | BPF_X; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_X; + /* A ^= X */ + *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); break; default: @@ -897,9 +869,7 @@ do_pass: fp = prog; if (new_insn) { - new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - new_insn->a_reg = BPF_REG_CTX; - new_insn->x_reg = BPF_REG_ARG1; + *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); } new_insn++; @@ -1027,34 +997,28 @@ do_pass: /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_TMP; - insn->x_reg = BPF_REG_A; + /* tmp = A */ + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); insn++; - insn->code = BPF_LD | BPF_ABS | BPF_B; - insn->a_reg = BPF_REG_A; - insn->imm = fp->k; + /* A = R0 = *(u8 *) (skb->data + K) */ + *insn = BPF_LD_ABS(BPF_B, fp->k); insn++; - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = 0xf; + /* A &= 0xf */ + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); insn++; - insn->code = BPF_ALU | BPF_LSH | BPF_K; - insn->a_reg = BPF_REG_A; - insn->imm = 2; + /* A <<= 2 */ + *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); insn++; - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_X; - insn->x_reg = BPF_REG_A; + /* X = A */ + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); insn++; - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_TMP; + /* A = tmp */ + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); break; /* RET_K, RET_A are remaped into 2 insns. */ @@ -1068,7 +1032,7 @@ do_pass: insn->imm = fp->k; insn++; - insn->code = BPF_JMP | BPF_EXIT; + *insn = BPF_EXIT_INSN(); break; /* Store to stack. */ @@ -1102,16 +1066,12 @@ do_pass: /* X = A */ case BPF_MISC | BPF_TAX: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_X; - insn->x_reg = BPF_REG_A; + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); break; /* A = X */ case BPF_MISC | BPF_TXA: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_X; + *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); break; /* A = skb->len or X = skb->len */ @@ -1126,10 +1086,8 @@ do_pass: /* access seccomp_data fields */ case BPF_LDX | BPF_ABS | BPF_W: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_CTX; - insn->off = fp->k; + /* A = *(u32 *) (ctx + K) */ + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break; default: -- cgit v1.2.3-59-g8ed1b From 60ff746739bf805a912484643c720b6124826140 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Sun, 4 May 2014 16:39:18 -0700 Subject: net: rename local_df to ignore_df MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As suggested by several people, rename local_df to ignore_df, since it means "ignore df bit if it is set". Cc: Maciej Żenczykowski Cc: Florian Westphal Cc: David S. Miller Cc: Eric Dumazet Signed-off-by: Cong Wang Acked-by: Maciej Żenczykowski Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++-- include/net/ip.h | 6 +++--- include/net/ip6_route.h | 2 +- net/core/skbuff.c | 4 ++-- net/ipv4/ip_forward.c | 2 +- net/ipv4/ip_output.c | 12 ++++++------ net/ipv4/netfilter/nf_defrag_ipv4.c | 2 +- net/ipv4/xfrm4_output.c | 2 +- net/ipv6/ip6_output.c | 12 ++++++------ net/ipv6/netfilter/nf_conntrack_reasm.c | 2 +- net/ipv6/xfrm6_output.c | 6 +++--- net/l2tp/l2tp_core.c | 2 +- net/netfilter/ipvs/ip_vs_xmit.c | 20 ++++++++++---------- net/openvswitch/vport-gre.c | 2 +- net/openvswitch/vport-vxlan.c | 2 +- net/sctp/ipv6.c | 2 +- net/sctp/output.c | 2 +- 17 files changed, 42 insertions(+), 42 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ca0dda5a42e..7a9beeb1c458 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -426,7 +426,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @csum_start: Offset from skb->head where checksumming should start * @csum_offset: Offset from csum_start where checksum should be stored * @priority: Packet queueing priority - * @local_df: allow local fragmentation + * @ignore_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header @@ -514,7 +514,7 @@ struct sk_buff { }; __u32 priority; kmemcheck_bitfield_begin(flags1); - __u8 local_df:1, + __u8 ignore_df:1, cloned:1, ip_summed:2, nohdr:1, diff --git a/include/net/ip.h b/include/net/ip.h index 16146b667ddb..55752985c144 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -269,7 +269,7 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk) return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; } -static inline bool ip_sk_local_df(const struct sock *sk) +static inline bool ip_sk_ignore_df(const struct sock *sk) { return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; @@ -304,7 +304,7 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s { struct iphdr *iph = ip_hdr(skb); - if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { /* This is only to work around buggy Windows95/2000 * VJ compression implementations. If the ID field * does not change, they drop every other packet in @@ -320,7 +320,7 @@ static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *d { struct iphdr *iph = ip_hdr(skb); - if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { if (sk && inet_sk(sk)->inet_daddr) { iph->id = htons(inet_sk(sk)->inet_id); inet_sk(sk)->inet_id += 1 + more; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 6c4f5eac98e7..38e41e4d0998 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -185,7 +185,7 @@ static inline bool ip6_sk_accept_pmtu(const struct sock *sk) inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; } -static inline bool ip6_sk_local_df(const struct sock *sk) +static inline bool ip6_sk_ignore_df(const struct sock *sk) { return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1b62343f5837..3d74530ae82b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #endif memcpy(new->cb, old->cb, sizeof(old->cb)); new->csum = old->csum; - new->local_df = old->local_df; + new->ignore_df = old->ignore_df; new->pkt_type = old->pkt_type; new->ip_summed = old->ip_summed; skb_copy_queue_mapping(new, old); @@ -3913,7 +3913,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; - skb->local_df = 0; + skb->ignore_df = 0; skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 6f111e48e11c..3a83ce5efa80 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -42,7 +42,7 @@ static bool ip_may_fragment(const struct sk_buff *skb) { return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || - skb->local_df; + skb->ignore_df; } static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a52f50187b54..6aa4380fde1a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -415,7 +415,7 @@ packet_routed: skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); - if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) + if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; @@ -501,7 +501,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) iph = ip_hdr(skb); mtu = ip_skb_dst_mtu(skb); - if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) || + if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size > mtu))) { IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); @@ -866,7 +866,7 @@ static int __ip_append_data(struct sock *sk, fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; - maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; + maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; if (cork->length + length > maxnonfragsize - fragheaderlen) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, @@ -1189,7 +1189,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; - maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; + maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; if (cork->length + size > maxnonfragsize - fragheaderlen) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, @@ -1350,10 +1350,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk, * to fragment the frame generated here. No matter, what transforms * how transforms change size of the packet, it will come out. */ - skb->local_df = ip_sk_local_df(sk); + skb->ignore_df = ip_sk_ignore_df(sk); /* DF bit is set when we want to see DF on outgoing frames. - * If local_df is set too, we still allow to fragment this frame + * If ignore_df is set too, we still allow to fragment this frame * locally. */ if (inet->pmtudisc == IP_PMTUDISC_DO || inet->pmtudisc == IP_PMTUDISC_PROBE || diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index f40f321b41fc..b8f6381c7d0b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -34,7 +34,7 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) if (!err) { ip_send_check(ip_hdr(skb)); - skb->local_df = 1; + skb->ignore_df = 1; } return err; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 40e701f2e1e0..8e8c018d9d2d 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -25,7 +25,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) goto out; - if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) + if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) goto out; mtu = dst_mtu(skb_dst(skb)); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 31a38bde69ef..ab0cc57f779c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, skb->mark = sk->sk_mark; mtu = dst_mtu(dst); - if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { + if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, @@ -347,11 +347,11 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) if (skb->len <= mtu) return false; - /* ipv6 conntrack defrag sets max_frag_size + local_df */ + /* ipv6 conntrack defrag sets max_frag_size + ignore_df */ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) return true; - if (skb->local_df) + if (skb->ignore_df) return false; if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) @@ -559,7 +559,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ - if (unlikely(!skb->local_df && skb->len > mtu) || + if (unlikely(!skb->ignore_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) @@ -1234,7 +1234,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; - if (ip6_sk_local_df(sk)) + if (ip6_sk_ignore_df(sk)) maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN; else maxnonfragsize = mtu; @@ -1544,7 +1544,7 @@ int ip6_push_pending_frames(struct sock *sk) } /* Allow local fragmentation. */ - skb->local_df = ip6_sk_local_df(sk); + skb->ignore_df = ip6_sk_ignore_df(sk); *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 767ab8da8218..0d5279fd852a 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) } sub_frag_mem_limit(&fq->q, head->truesize); - head->local_df = 1; + head->ignore_df = 1; head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 19ef329bdbf8..f47c8b153dd3 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - if (!skb->local_df && skb->len > mtu) { + if (!skb->ignore_df && skb->len > mtu) { skb->dev = dst->dev; if (xfrm6_local_dontfrag(skb)) @@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) #endif skb->protocol = htons(ETH_P_IPV6); - skb->local_df = 1; + skb->ignore_df = 1; return x->outer_mode->output2(x, skb); } @@ -150,7 +150,7 @@ static int __xfrm6_output(struct sk_buff *skb) if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { xfrm6_local_rxpmtu(skb, mtu); return -EMSGSIZE; - } else if (!skb->local_df && skb->len > mtu && skb->sk) { + } else if (!skb->ignore_df && skb->len > mtu && skb->sk) { xfrm_local_error(skb, mtu); return -EMSGSIZE; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index aa1a9d44c107..ed0716a075ba 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1073,7 +1073,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, } /* Queue the packet to IP for output */ - skb->local_df = 1; + skb->ignore_df = 1; #if IS_ENABLED(CONFIG_IPV6) if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) error = inet6_csk_xmit(tunnel->sock, skb, NULL); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index c47444e4cf8c..487b55e04337 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -562,7 +562,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_send_check(iph); /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); rcu_read_unlock(); @@ -590,7 +590,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error; /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); rcu_read_unlock(); @@ -684,7 +684,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, MTU problem. */ /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); rcu_read_unlock(); @@ -774,7 +774,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, MTU problem. */ /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); rcu_read_unlock(); @@ -886,7 +886,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_select_ident(skb, &rt->dst, NULL); /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) @@ -974,7 +974,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, iph->hop_limit = old_iph->hop_limit; /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) @@ -1023,7 +1023,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_send_check(ip_hdr(skb)); /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); rcu_read_unlock(); @@ -1060,7 +1060,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); rcu_read_unlock(); @@ -1157,7 +1157,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_icmp(skb, pp, cp, 0); /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); rcu_read_unlock(); @@ -1249,7 +1249,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_icmp_v6(skb, pp, cp, 0); /* Another hack: avoid icmp_send in ip_fragment */ - skb->local_df = 1; + skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); rcu_read_unlock(); diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index ebb6e2442554..0856f014d7a9 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -172,7 +172,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; - skb->local_df = 1; + skb->ignore_df = 1; return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 21cceb3bdf78..a93efa3f64c3 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -170,7 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; - skb->local_df = 1; + skb->ignore_df = 1; inet_get_local_port_range(net, &port_min, &port_max); src_port = vxlan_src_port(port_min, port_max, skb); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 2b1738ef9394..4dc5d9e08311 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -216,7 +216,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) IP6_ECN_flow_xmit(sk, fl6->flowlabel); if (!(transport->param_flags & SPP_PMTUD_ENABLE)) - skb->local_df = 1; + skb->ignore_df = 1; SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); diff --git a/net/sctp/output.c b/net/sctp/output.c index 0f4d15fc2627..01ab8e0723f0 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -591,7 +591,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); - nskb->local_df = packet->ipfragok; + nskb->ignore_df = packet->ipfragok; tp->af_specific->sctp_xmit(nskb, tp); out: -- cgit v1.2.3-59-g8ed1b From 1268e253a81e504bc5d5cb7f887dbd538984f137 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 13 May 2014 13:13:33 -0400 Subject: net: filter: Fix redefinition warnings on x86-64. Do not collide with the x86-64 PTRACE user API namespace. net/core/filter.c:57:0: warning: "R8" redefined [enabled by default] arch/x86/include/uapi/asm/ptrace-abi.h:38:0: note: this is the location of the previous definition Fix by adding a BPF_ prefix to the register macros. Reported-by: Randy Dunlap Signed-off-by: David S. Miller --- net/core/filter.c | 71 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 34 deletions(-) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index 9aaa05ad8fe3..c442a0d7d0f7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,17 +46,17 @@ #include /* Registers */ -#define R0 regs[BPF_REG_0] -#define R1 regs[BPF_REG_1] -#define R2 regs[BPF_REG_2] -#define R3 regs[BPF_REG_3] -#define R4 regs[BPF_REG_4] -#define R5 regs[BPF_REG_5] -#define R6 regs[BPF_REG_6] -#define R7 regs[BPF_REG_7] -#define R8 regs[BPF_REG_8] -#define R9 regs[BPF_REG_9] -#define R10 regs[BPF_REG_10] +#define BPF_R0 regs[BPF_REG_0] +#define BPF_R1 regs[BPF_REG_1] +#define BPF_R2 regs[BPF_REG_2] +#define BPF_R3 regs[BPF_REG_3] +#define BPF_R4 regs[BPF_REG_4] +#define BPF_R5 regs[BPF_REG_5] +#define BPF_R6 regs[BPF_REG_6] +#define BPF_R7 regs[BPF_REG_7] +#define BPF_R8 regs[BPF_REG_8] +#define BPF_R9 regs[BPF_REG_9] +#define BPF_R10 regs[BPF_REG_10] /* Named registers */ #define A regs[insn->a_reg] @@ -383,10 +383,12 @@ select_insn: /* CALL */ JMP_CALL_0: - /* Function call scratches R1-R5 registers, preserves R6-R9, - * and stores return value into R0. + /* Function call scratches BPF_R1-BPF_R5 registers, + * preserves BPF_R6-BPF_R9, and stores return value + * into BPF_R0. */ - R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5); + BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, + BPF_R4, BPF_R5); CONT; /* JMP */ @@ -478,7 +480,7 @@ select_insn: } CONT; JMP_EXIT_0: - return R0; + return BPF_R0; /* STX and ST and LDX*/ #define LDST(SIZEOP, SIZE) \ @@ -505,18 +507,19 @@ select_insn: atomic64_add((u64) X, (atomic64_t *)(unsigned long) (A + insn->off)); CONT; - LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ + LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ off = K; load_word: - /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only - * appearing in the programs where ctx == skb. All programs - * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter() - * saves it in R6, internal BPF verifier will check that - * R6 == ctx. + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are + * only appearing in the programs where ctx == + * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] + * == BPF_R6, sk_convert_filter() saves it in BPF_R6, + * internal BPF verifier will check that BPF_R6 == + * ctx. * - * BPF_ABS and BPF_IND are wrappers of function calls, so - * they scratch R1-R5 registers, preserve R6-R9, and store - * return value into R0. + * BPF_ABS and BPF_IND are wrappers of function calls, + * so they scratch BPF_R1-BPF_R5 registers, preserve + * BPF_R6-BPF_R9, and store return value into BPF_R0. * * Implicit input: * ctx @@ -526,39 +529,39 @@ load_word: * K == 32-bit immediate * * Output: - * R0 - 8/16/32-bit skb data converted to cpu endianness + * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be32(ptr); + BPF_R0 = get_unaligned_be32(ptr); CONT; } return 0; - LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ + LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; load_half: ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be16(ptr); + BPF_R0 = get_unaligned_be16(ptr); CONT; } return 0; - LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */ + LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ off = K; load_byte: ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); if (likely(ptr != NULL)) { - R0 = *(u8 *)ptr; + BPF_R0 = *(u8 *)ptr; CONT; } return 0; - LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ + LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; goto load_word; - LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ + LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ off = K + X; goto load_half; - LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */ + LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ off = K + X; goto load_byte; @@ -1001,7 +1004,7 @@ do_pass: *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); insn++; - /* A = R0 = *(u8 *) (skb->data + K) */ + /* A = BPF_R0 = *(u8 *) (skb->data + K) */ *insn = BPF_LD_ABS(BPF_B, fp->k); insn++; -- cgit v1.2.3-59-g8ed1b From 0f49ff07025e8d6cd25bce5b623881739e04926e Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 10 May 2014 22:23:28 +0200 Subject: net: ptp: mark filter as __initdata sk_unattached_filter_create() will copy the filter's instructions so we don't need to have the master copy hanging around after initialization. Signed-off-by: Mathias Krause Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/ptp_classifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c index eaba0f68f860..37d86157b76e 100644 --- a/net/core/ptp_classifier.c +++ b/net/core/ptp_classifier.c @@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw); void __init ptp_classifier_init(void) { - static struct sock_filter ptp_filter[] = { + static struct sock_filter ptp_filter[] __initdata = { { 0x28, 0, 0, 0x0000000c }, { 0x15, 0, 12, 0x00000800 }, { 0x30, 0, 0, 0x00000017 }, -- cgit v1.2.3-59-g8ed1b From 622582786c9e041d0bd52bde201787adeab249f8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 13 May 2014 19:50:46 -0700 Subject: net: filter: x86: internal BPF JIT Maps all internal BPF instructions into x86_64 instructions. This patch replaces original BPF x64 JIT with internal BPF x64 JIT. sysctl net.core.bpf_jit_enable is reused as on/off switch. Performance: 1. old BPF JIT and internal BPF JIT generate equivalent x86_64 code. No performance difference is observed for filters that were JIT-able before Example assembler code for BPF filter "tcpdump port 22" original BPF -> old JIT: original BPF -> internal BPF -> new JIT: 0: push %rbp 0: push %rbp 1: mov %rsp,%rbp 1: mov %rsp,%rbp 4: sub $0x60,%rsp 4: sub $0x228,%rsp 8: mov %rbx,-0x8(%rbp) b: mov %rbx,-0x228(%rbp) // prologue 12: mov %r13,-0x220(%rbp) 19: mov %r14,-0x218(%rbp) 20: mov %r15,-0x210(%rbp) 27: xor %eax,%eax // clear A c: xor %ebx,%ebx 29: xor %r13,%r13 // clear X e: mov 0x68(%rdi),%r9d 2c: mov 0x68(%rdi),%r9d 12: sub 0x6c(%rdi),%r9d 30: sub 0x6c(%rdi),%r9d 16: mov 0xd8(%rdi),%r8 34: mov 0xd8(%rdi),%r10 3b: mov %rdi,%rbx 1d: mov $0xc,%esi 3e: mov $0xc,%esi 22: callq 0xffffffffe1021e15 43: callq 0xffffffffe102bd75 27: cmp $0x86dd,%eax 48: cmp $0x86dd,%rax 2c: jne 0x0000000000000069 4f: jne 0x000000000000009a 2e: mov $0x14,%esi 51: mov $0x14,%esi 33: callq 0xffffffffe1021e31 56: callq 0xffffffffe102bd91 38: cmp $0x84,%eax 5b: cmp $0x84,%rax 3d: je 0x0000000000000049 62: je 0x0000000000000074 3f: cmp $0x6,%eax 64: cmp $0x6,%rax 42: je 0x0000000000000049 68: je 0x0000000000000074 44: cmp $0x11,%eax 6a: cmp $0x11,%rax 47: jne 0x00000000000000c6 6e: jne 0x0000000000000117 49: mov $0x36,%esi 74: mov $0x36,%esi 4e: callq 0xffffffffe1021e15 79: callq 0xffffffffe102bd75 53: cmp $0x16,%eax 7e: cmp $0x16,%rax 56: je 0x00000000000000bf 82: je 0x0000000000000110 58: mov $0x38,%esi 88: mov $0x38,%esi 5d: callq 0xffffffffe1021e15 8d: callq 0xffffffffe102bd75 62: cmp $0x16,%eax 92: cmp $0x16,%rax 65: je 0x00000000000000bf 96: je 0x0000000000000110 67: jmp 0x00000000000000c6 98: jmp 0x0000000000000117 69: cmp $0x800,%eax 9a: cmp $0x800,%rax 6e: jne 0x00000000000000c6 a1: jne 0x0000000000000117 70: mov $0x17,%esi a3: mov $0x17,%esi 75: callq 0xffffffffe1021e31 a8: callq 0xffffffffe102bd91 7a: cmp $0x84,%eax ad: cmp $0x84,%rax 7f: je 0x000000000000008b b4: je 0x00000000000000c2 81: cmp $0x6,%eax b6: cmp $0x6,%rax 84: je 0x000000000000008b ba: je 0x00000000000000c2 86: cmp $0x11,%eax bc: cmp $0x11,%rax 89: jne 0x00000000000000c6 c0: jne 0x0000000000000117 8b: mov $0x14,%esi c2: mov $0x14,%esi 90: callq 0xffffffffe1021e15 c7: callq 0xffffffffe102bd75 95: test $0x1fff,%ax cc: test $0x1fff,%rax 99: jne 0x00000000000000c6 d3: jne 0x0000000000000117 d5: mov %rax,%r14 9b: mov $0xe,%esi d8: mov $0xe,%esi a0: callq 0xffffffffe1021e44 dd: callq 0xffffffffe102bd91 // MSH e2: and $0xf,%eax e5: shl $0x2,%eax e8: mov %rax,%r13 eb: mov %r14,%rax ee: mov %r13,%rsi a5: lea 0xe(%rbx),%esi f1: add $0xe,%esi a8: callq 0xffffffffe1021e0d f4: callq 0xffffffffe102bd6d ad: cmp $0x16,%eax f9: cmp $0x16,%rax b0: je 0x00000000000000bf fd: je 0x0000000000000110 ff: mov %r13,%rsi b2: lea 0x10(%rbx),%esi 102: add $0x10,%esi b5: callq 0xffffffffe1021e0d 105: callq 0xffffffffe102bd6d ba: cmp $0x16,%eax 10a: cmp $0x16,%rax bd: jne 0x00000000000000c6 10e: jne 0x0000000000000117 bf: mov $0xffff,%eax 110: mov $0xffff,%eax c4: jmp 0x00000000000000c8 115: jmp 0x000000000000011c c6: xor %eax,%eax 117: mov $0x0,%eax c8: mov -0x8(%rbp),%rbx 11c: mov -0x228(%rbp),%rbx // epilogue cc: leaveq 123: mov -0x220(%rbp),%r13 cd: retq 12a: mov -0x218(%rbp),%r14 131: mov -0x210(%rbp),%r15 138: leaveq 139: retq On fully cached SKBs both JITed functions take 12 nsec to execute. BPF interpreter executes the program in 30 nsec. The difference in generated assembler is due to the following: Old BPF imlements LDX_MSH instruction via sk_load_byte_msh() helper function inside bpf_jit.S. New JIT removes the helper and does it explicitly, so ldx_msh cost is the same for both JITs, but generated code looks longer. New JIT has 4 registers to save, so prologue/epilogue are larger, but the cost is within noise on x64. Old JIT checks whether first insn clears A and if not emits 'xor %eax,%eax'. New JIT clears %rax unconditionally. 2. old BPF JIT doesn't support ANC_NLATTR, ANC_PAY_OFFSET, ANC_RANDOM extensions. New JIT supports all BPF extensions. Performance of such filters improves 2-4 times depending on a filter. The longer the filter the higher performance gain. Synthetic benchmarks with many ancillary loads see 20x speedup which seems to be the maximum gain from JIT Notes: . net.core.bpf_jit_enable=2 + tools/net/bpf_jit_disasm is still functional and can be used to see generated assembler . there are two jit_compile() functions and code flow for classic filters is: sk_attach_filter() - load classic BPF bpf_jit_compile() - try to JIT from classic BPF sk_convert_filter() - convert classic to internal bpf_int_jit_compile() - JIT from internal BPF seccomp and tracing filters will just call bpf_int_jit_compile() Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit.S | 77 +-- arch/x86/net/bpf_jit_comp.c | 1314 +++++++++++++++++++++++-------------------- include/linux/filter.h | 3 + net/core/filter.c | 9 +- 4 files changed, 748 insertions(+), 655 deletions(-) (limited to 'net/core') diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 01495755701b..6440221ced0d 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -12,13 +12,16 @@ /* * Calling convention : - * rdi : skb pointer + * rbx : skb pointer (callee saved) * esi : offset of byte(s) to fetch in skb (can be scratched) - * r8 : copy of skb->data + * r10 : copy of skb->data * r9d : hlen = skb->len - skb->data_len */ -#define SKBDATA %r8 +#define SKBDATA %r10 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ +#define MAX_BPF_STACK (512 /* from filter.h */ + \ + 32 /* space for rbx,r13,r14,r15 */ + \ + 8 /* space for skb_copy_bits */) sk_load_word: .globl sk_load_word @@ -68,53 +71,31 @@ sk_load_byte_positive_offset: movzbl (SKBDATA,%rsi),%eax ret -/** - * sk_load_byte_msh - BPF_S_LDX_B_MSH helper - * - * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) - * Must preserve A accumulator (%eax) - * Inputs : %esi is the offset value - */ -sk_load_byte_msh: - .globl sk_load_byte_msh - test %esi,%esi - js bpf_slow_path_byte_msh_neg - -sk_load_byte_msh_positive_offset: - .globl sk_load_byte_msh_positive_offset - cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ - jle bpf_slow_path_byte_msh - movzbl (SKBDATA,%rsi),%ebx - and $15,%bl - shl $2,%bl - ret - /* rsi contains offset and can be scratched */ #define bpf_slow_path_common(LEN) \ - push %rdi; /* save skb */ \ + mov %rbx, %rdi; /* arg1 == skb */ \ push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ mov $LEN,%ecx; /* len */ \ - lea -12(%rbp),%rdx; \ + lea - MAX_BPF_STACK + 32(%rbp),%rdx; \ call skb_copy_bits; \ test %eax,%eax; \ pop SKBDATA; \ - pop %r9; \ - pop %rdi + pop %r9; bpf_slow_path_word: bpf_slow_path_common(4) js bpf_error - mov -12(%rbp),%eax + mov - MAX_BPF_STACK + 32(%rbp),%eax bswap %eax ret bpf_slow_path_half: bpf_slow_path_common(2) js bpf_error - mov -12(%rbp),%ax + mov - MAX_BPF_STACK + 32(%rbp),%ax rol $8,%ax movzwl %ax,%eax ret @@ -122,21 +103,11 @@ bpf_slow_path_half: bpf_slow_path_byte: bpf_slow_path_common(1) js bpf_error - movzbl -12(%rbp),%eax - ret - -bpf_slow_path_byte_msh: - xchg %eax,%ebx /* dont lose A , X is about to be scratched */ - bpf_slow_path_common(1) - js bpf_error - movzbl -12(%rbp),%eax - and $15,%al - shl $2,%al - xchg %eax,%ebx + movzbl - MAX_BPF_STACK + 32(%rbp),%eax ret #define sk_negative_common(SIZE) \ - push %rdi; /* save skb */ \ + mov %rbx, %rdi; /* arg1 == skb */ \ push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ @@ -145,10 +116,8 @@ bpf_slow_path_byte_msh: test %rax,%rax; \ pop SKBDATA; \ pop %r9; \ - pop %rdi; \ jz bpf_error - bpf_slow_path_word_neg: cmp SKF_MAX_NEG_OFF, %esi /* test range */ jl bpf_error /* offset lower -> error */ @@ -179,22 +148,12 @@ sk_load_byte_negative_offset: movzbl (%rax), %eax ret -bpf_slow_path_byte_msh_neg: - cmp SKF_MAX_NEG_OFF, %esi - jl bpf_error -sk_load_byte_msh_negative_offset: - .globl sk_load_byte_msh_negative_offset - xchg %eax,%ebx /* dont lose A , X is about to be scratched */ - sk_negative_common(1) - movzbl (%rax),%eax - and $15,%al - shl $2,%al - xchg %eax,%ebx - ret - bpf_error: # force a return 0 from jit handler - xor %eax,%eax - mov -8(%rbp),%rbx + xor %eax,%eax + mov - MAX_BPF_STACK(%rbp),%rbx + mov - MAX_BPF_STACK + 8(%rbp),%r13 + mov - MAX_BPF_STACK + 16(%rbp),%r14 + mov - MAX_BPF_STACK + 24(%rbp),%r15 leaveq ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index c5fa7c9cb665..92aef8fdac2f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1,6 +1,7 @@ /* bpf_jit_comp.c : BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) + * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,28 +15,16 @@ #include #include -/* - * Conventions : - * EAX : BPF A accumulator - * EBX : BPF X accumulator - * RDI : pointer to skb (first argument given to JIT function) - * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n) - * ECX,EDX,ESI : scratch registers - * r9d : skb->len - skb->data_len (headlen) - * r8 : skb->data - * -8(RBP) : saved RBX value - * -16(RBP)..-80(RBP) : BPF_MEMWORDS values - */ int bpf_jit_enable __read_mostly; /* * assembly code in arch/x86/net/bpf_jit.S */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; -extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; +extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; -extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; +extern u8 sk_load_byte_negative_offset[]; static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) -#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) - -#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ -#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ +#define EMIT1_off32(b1, off) \ + do {EMIT1(b1); EMIT(off, 4); } while (0) +#define EMIT2_off32(b1, b2, off) \ + do {EMIT2(b1, b2); EMIT(off, 4); } while (0) +#define EMIT3_off32(b1, b2, b3, off) \ + do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) +#define EMIT4_off32(b1, b2, b3, b4, off) \ + do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) static inline bool is_imm8(int value) { return value <= 127 && value >= -128; } -static inline bool is_near(int offset) +static inline bool is_simm32(s64 value) { - return offset <= 127 && offset >= -128; + return value == (s64) (s32) value; } -#define EMIT_JMP(offset) \ -do { \ - if (offset) { \ - if (is_near(offset)) \ - EMIT2(0xeb, offset); /* jmp .+off8 */ \ - else \ - EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ - } \ -} while (0) +/* mov A, X */ +#define EMIT_mov(A, X) \ + do {if (A != X) \ + EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ + } while (0) + +static int bpf_size_to_x86_bytes(int bpf_size) +{ + if (bpf_size == BPF_W) + return 4; + else if (bpf_size == BPF_H) + return 2; + else if (bpf_size == BPF_B) + return 1; + else if (bpf_size == BPF_DW) + return 4; /* imm32 */ + else + return 0; +} /* list of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) @@ -90,27 +93,8 @@ do { \ #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 - -#define EMIT_COND_JMP(op, offset) \ -do { \ - if (is_near(offset)) \ - EMIT2(op, offset); /* jxx .+off8 */ \ - else { \ - EMIT2(0x0f, op + 0x10); \ - EMIT(offset, 4); /* jxx .+off32 */ \ - } \ -} while (0) - -#define COND_SEL(CODE, TOP, FOP) \ - case CODE: \ - t_op = TOP; \ - f_op = FOP; \ - goto cond_branch - - -#define SEEN_DATAREF 1 /* might call external helpers */ -#define SEEN_XREG 2 /* ebx is used */ -#define SEEN_MEM 4 /* use mem[] for temporary storage */ +#define X86_JGE 0x7D +#define X86_JG 0x7F static inline void bpf_flush_icache(void *start, void *end) { @@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end) #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) -/* Helper to find the offset of pkt_type in sk_buff - * We want to make sure its still a 3bit field starting at a byte boundary. - */ -#define PKT_TYPE_MAX 7 -static int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - char *ct = (char *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; -} - struct bpf_binary_header { unsigned int pages; /* Note : for security reasons, bpf code will follow a randomly @@ -178,546 +142,715 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, return header; } +/* pick a register outside of BPF range for JIT internal work */ +#define AUX_REG (MAX_BPF_REG + 1) + +/* the following table maps BPF registers to x64 registers. + * x64 register r12 is unused, since if used as base address register + * in load/store instructions, it always needs an extra byte of encoding + */ +static const int reg2hex[] = { + [BPF_REG_0] = 0, /* rax */ + [BPF_REG_1] = 7, /* rdi */ + [BPF_REG_2] = 6, /* rsi */ + [BPF_REG_3] = 2, /* rdx */ + [BPF_REG_4] = 1, /* rcx */ + [BPF_REG_5] = 0, /* r8 */ + [BPF_REG_6] = 3, /* rbx callee saved */ + [BPF_REG_7] = 5, /* r13 callee saved */ + [BPF_REG_8] = 6, /* r14 callee saved */ + [BPF_REG_9] = 7, /* r15 callee saved */ + [BPF_REG_FP] = 5, /* rbp readonly */ + [AUX_REG] = 3, /* r11 temp register */ +}; + +/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 + * which need extra byte of encoding. + * rax,rcx,...,rbp have simpler encoding + */ +static inline bool is_ereg(u32 reg) +{ + if (reg == BPF_REG_5 || reg == AUX_REG || + (reg >= BPF_REG_7 && reg <= BPF_REG_9)) + return true; + else + return false; +} + +/* add modifiers if 'reg' maps to x64 registers r8..r15 */ +static inline u8 add_1mod(u8 byte, u32 reg) +{ + if (is_ereg(reg)) + byte |= 1; + return byte; +} + +static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) +{ + if (is_ereg(r1)) + byte |= 1; + if (is_ereg(r2)) + byte |= 4; + return byte; +} + +/* encode dest register 'a_reg' into x64 opcode 'byte' */ +static inline u8 add_1reg(u8 byte, u32 a_reg) +{ + return byte + reg2hex[a_reg]; +} + +/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ +static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) +{ + return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); +} + struct jit_context { unsigned int cleanup_addr; /* epilogue code offset */ - int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ - u8 seen; + bool seen_ld_abs; }; static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { - const struct sock_filter *filter = bpf_prog->insns; - int flen = bpf_prog->len; + struct sock_filter_int *insn = bpf_prog->insnsi; + int insn_cnt = bpf_prog->len; u8 temp[64]; - u8 *prog; - int ilen, i, proglen; - int t_offset, f_offset; - u8 t_op, f_op, seen = 0; - u8 *func; - unsigned int cleanup_addr = ctx->cleanup_addr; - u8 seen_or_pass0 = ctx->seen; - - /* no prologue/epilogue for trivial filters (RET something) */ - proglen = 0; - prog = temp; + int i; + int proglen = 0; + u8 *prog = temp; + int stacksize = MAX_BPF_STACK + + 32 /* space for rbx, r13, r14, r15 */ + + 8 /* space for skb_copy_bits() buffer */; - if (seen_or_pass0) { - EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ - EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ - /* note : must save %rbx in case bpf_error is hit */ - if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) - EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ - if (seen_or_pass0 & SEEN_XREG) - CLEAR_X(); /* make sure we dont leek kernel memory */ - - /* - * If this filter needs to access skb data, - * loads r9 and r8 with : - * r9 = skb->len - skb->data_len - * r8 = skb->data - */ - if (seen_or_pass0 & SEEN_DATAREF) { - if (offsetof(struct sk_buff, len) <= 127) - /* mov off8(%rdi),%r9d */ - EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); - else { - /* mov off32(%rdi),%r9d */ - EMIT3(0x44, 0x8b, 0x8f); - EMIT(offsetof(struct sk_buff, len), 4); - } - if (is_imm8(offsetof(struct sk_buff, data_len))) - /* sub off8(%rdi),%r9d */ - EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); - else { - EMIT3(0x44, 0x2b, 0x8f); - EMIT(offsetof(struct sk_buff, data_len), 4); - } + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ - if (is_imm8(offsetof(struct sk_buff, data))) - /* mov off8(%rdi),%r8 */ - EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); - else { - /* mov off32(%rdi),%r8 */ - EMIT3(0x4c, 0x8b, 0x87); - EMIT(offsetof(struct sk_buff, data), 4); - } + /* sub rsp, stacksize */ + EMIT3_off32(0x48, 0x81, 0xEC, stacksize); + + /* all classic BPF filters use R6(rbx) save it */ + + /* mov qword ptr [rbp-X],rbx */ + EMIT3_off32(0x48, 0x89, 0x9D, -stacksize); + + /* sk_convert_filter() maps classic BPF register X to R7 and uses R8 + * as temporary, so all tcpdump filters need to spill/fill R7(r13) and + * R8(r14). R9(r15) spill could be made conditional, but there is only + * one 'bpf_error' return path out of helper functions inside bpf_jit.S + * The overhead of extra spill is negligible for any filter other + * than synthetic ones. Therefore not worth adding complexity. + */ + + /* mov qword ptr [rbp-X],r13 */ + EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8); + /* mov qword ptr [rbp-X],r14 */ + EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16); + /* mov qword ptr [rbp-X],r15 */ + EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24); + + /* clear A and X registers */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ + + if (ctx->seen_ld_abs) { + /* r9d : skb->len - skb->data_len (headlen) + * r10 : skb->data + */ + if (is_imm8(offsetof(struct sk_buff, len))) + /* mov %r9d, off8(%rdi) */ + EMIT4(0x44, 0x8b, 0x4f, + offsetof(struct sk_buff, len)); + else + /* mov %r9d, off32(%rdi) */ + EMIT3_off32(0x44, 0x8b, 0x8f, + offsetof(struct sk_buff, len)); + + if (is_imm8(offsetof(struct sk_buff, data_len))) + /* sub %r9d, off8(%rdi) */ + EMIT4(0x44, 0x2b, 0x4f, + offsetof(struct sk_buff, data_len)); + else + EMIT3_off32(0x44, 0x2b, 0x8f, + offsetof(struct sk_buff, data_len)); + + if (is_imm8(offsetof(struct sk_buff, data))) + /* mov %r10, off8(%rdi) */ + EMIT4(0x4c, 0x8b, 0x57, + offsetof(struct sk_buff, data)); + else + /* mov %r10, off32(%rdi) */ + EMIT3_off32(0x4c, 0x8b, 0x97, + offsetof(struct sk_buff, data)); + } + + for (i = 0; i < insn_cnt; i++, insn++) { + const s32 K = insn->imm; + u32 a_reg = insn->a_reg; + u32 x_reg = insn->x_reg; + u8 b1 = 0, b2 = 0, b3 = 0; + s64 jmp_offset; + u8 jmp_cond; + int ilen; + u8 *func; + + switch (insn->code) { + /* ALU */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_OP(insn->code)) { + case BPF_ADD: b2 = 0x01; break; + case BPF_SUB: b2 = 0x29; break; + case BPF_AND: b2 = 0x21; break; + case BPF_OR: b2 = 0x09; break; + case BPF_XOR: b2 = 0x31; break; } - } + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_2mod(0x48, a_reg, x_reg)); + else if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); + break; - switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_CPU: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: - case BPF_S_ANC_PKTTYPE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - /* first instruction sets A register (or is RET 'constant') */ + /* mov A, X */ + case BPF_ALU64 | BPF_MOV | BPF_X: + EMIT_mov(a_reg, x_reg); break; - default: - /* make sure we dont leak kernel information to user */ - CLEAR_A(); /* A = 0 */ - } - for (i = 0; i < flen; i++) { - unsigned int K = filter[i].k; + /* mov32 A, X */ + case BPF_ALU | BPF_MOV | BPF_X: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); + break; - switch (filter[i].code) { - case BPF_S_ALU_ADD_X: /* A += X; */ - seen |= SEEN_XREG; - EMIT2(0x01, 0xd8); /* add %ebx,%eax */ - break; - case BPF_S_ALU_ADD_K: /* A += K; */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ - else - EMIT1_off32(0x05, K); /* add imm32,%eax */ - break; - case BPF_S_ALU_SUB_X: /* A -= X; */ - seen |= SEEN_XREG; - EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ - break; - case BPF_S_ALU_SUB_K: /* A -= K */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ - else - EMIT1_off32(0x2d, K); /* sub imm32,%eax */ - break; - case BPF_S_ALU_MUL_X: /* A *= X; */ - seen |= SEEN_XREG; - EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ - break; - case BPF_S_ALU_MUL_K: /* A *= K */ - if (is_imm8(K)) - EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ - else { - EMIT2(0x69, 0xc0); /* imul imm32,%eax */ - EMIT(K, 4); - } - break; - case BPF_S_ALU_DIV_X: /* A /= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (ctx->pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 4) is the address following this jmp - * ("xor %edx,%edx; div %ebx" being 4 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - - (addrs[i] - 4)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ - } - EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ - break; - case BPF_S_ALU_MOD_X: /* A %= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (ctx->pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 6) is the address following this jmp - * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - - (addrs[i] - 6)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */ - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT2(0xf7, 0xf3); /* div %ebx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_MOD_K: /* A %= K; */ - if (K == 1) { - CLEAR_A(); - break; - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_DIV_K: /* A /= K */ - if (K == 1) - break; - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - break; - case BPF_S_ALU_AND_X: - seen |= SEEN_XREG; - EMIT2(0x21, 0xd8); /* and %ebx,%eax */ - break; - case BPF_S_ALU_AND_K: - if (K >= 0xFFFFFF00) { - EMIT2(0x24, K & 0xFF); /* and imm8,%al */ - } else if (K >= 0xFFFF0000) { - EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0x25, K); /* and imm32,%eax */ - } - break; - case BPF_S_ALU_OR_X: - seen |= SEEN_XREG; - EMIT2(0x09, 0xd8); /* or %ebx,%eax */ - break; - case BPF_S_ALU_OR_K: - if (is_imm8(K)) - EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ - else - EMIT1_off32(0x0d, K); /* or imm32,%eax */ - break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: - seen |= SEEN_XREG; - EMIT2(0x31, 0xd8); /* xor %ebx,%eax */ - break; - case BPF_S_ALU_XOR_K: /* A ^= K; */ - if (K == 0) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */ - else - EMIT1_off32(0x35, K); /* xor imm32,%eax */ - break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ - break; - case BPF_S_ALU_LSH_K: - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe0); /* shl %eax */ - else - EMIT3(0xc1, 0xe0, K); - break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ - break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe8); /* shr %eax */ - else - EMIT3(0xc1, 0xe8, K); - break; - case BPF_S_ALU_NEG: - EMIT2(0xf7, 0xd8); /* neg %eax */ - break; - case BPF_S_RET_K: - if (!K) { - if (ctx->pc_ret0 == -1) - ctx->pc_ret0 = i; - CLEAR_A(); - } else { - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ - } - /* fallinto */ - case BPF_S_RET_A: - if (seen_or_pass0) { - if (i != flen - 1) { - EMIT_JMP(cleanup_addr - addrs[i]); - break; - } - if (seen_or_pass0 & SEEN_XREG) - EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ - EMIT1(0xc9); /* leaveq */ - } - EMIT1(0xc3); /* ret */ - break; - case BPF_S_MISC_TAX: /* X = A */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ - break; - case BPF_S_MISC_TXA: /* A = X */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ - break; - case BPF_S_LD_IMM: /* A = K */ - if (!K) - CLEAR_A(); - else - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ + /* neg A */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT2(0xF7, add_1reg(0xD8, a_reg)); + break; + + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_ADD: b3 = 0xC0; break; + case BPF_SUB: b3 = 0xE8; break; + case BPF_AND: b3 = 0xE0; break; + case BPF_OR: b3 = 0xC8; break; + case BPF_XOR: b3 = 0xF0; break; + } + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(b3, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU64 | BPF_MOV | BPF_K: + /* optimization: if imm32 is positive, + * use 'mov eax, imm32' (which zero-extends imm32) + * to save 2 bytes + */ + if (K < 0) { + /* 'mov rax, imm32' sign extends imm32 */ + b1 = add_1mod(0x48, a_reg); + b2 = 0xC7; + b3 = 0xC0; + EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); break; - case BPF_S_LDX_IMM: /* X = K */ - seen |= SEEN_XREG; - if (!K) - CLEAR_X(); + } + + case BPF_ALU | BPF_MOV | BPF_K: + /* mov %eax, imm32 */ + if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT1_off32(add_1reg(0xB8, a_reg), K); + break; + + /* A %= X, A /= X, A %= K, A /= K */ + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + if (BPF_SRC(insn->code) == BPF_X) + /* mov r11, X */ + EMIT_mov(AUX_REG, x_reg); + else + /* mov r11, K */ + EMIT3_off32(0x49, 0xC7, 0xC3, K); + + /* mov rax, A */ + EMIT_mov(BPF_REG_0, a_reg); + + /* xor edx, edx + * equivalent to 'xor rdx, rdx', but one byte less + */ + EMIT2(0x31, 0xd2); + + if (BPF_SRC(insn->code) == BPF_X) { + /* if (X == 0) return 0 */ + + /* cmp r11, 0 */ + EMIT4(0x49, 0x83, 0xFB, 0x00); + + /* jne .+9 (skip over pop, pop, xor and jmp) */ + EMIT2(X86_JNE, 1 + 1 + 2 + 5); + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + + /* jmp cleanup_addr + * addrs[i] - 11, because there are 11 bytes + * after this insn: div, mov, pop, pop, mov + */ + jmp_offset = ctx->cleanup_addr - (addrs[i] - 11); + EMIT1_off32(0xE9, jmp_offset); + } + + if (BPF_CLASS(insn->code) == BPF_ALU64) + /* div r11 */ + EMIT3(0x49, 0xF7, 0xF3); + else + /* div r11d */ + EMIT3(0x41, 0xF7, 0xF3); + + if (BPF_OP(insn->code) == BPF_MOD) + /* mov r11, rdx */ + EMIT3(0x49, 0x89, 0xD3); + else + /* mov r11, rax */ + EMIT3(0x49, 0x89, 0xC3); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); + break; + + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_X: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + /* mov r11, A */ + EMIT_mov(AUX_REG, a_reg); + + if (BPF_SRC(insn->code) == BPF_X) + /* mov rax, X */ + EMIT_mov(BPF_REG_0, x_reg); + else + /* mov rax, K */ + EMIT3_off32(0x48, 0xC7, 0xC0, K); + + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, AUX_REG)); + else if (is_ereg(AUX_REG)) + EMIT1(add_1mod(0x40, AUX_REG)); + /* mul(q) r11 */ + EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); + + /* mov r11, rax */ + EMIT_mov(AUX_REG, BPF_REG_0); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); + break; + + /* shifts */ + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_LSH: b3 = 0xE0; break; + case BPF_RSH: b3 = 0xE8; break; + case BPF_ARSH: b3 = 0xF8; break; + } + EMIT3(0xC1, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (K) { + case 16: + /* emit 'ror %ax, 8' to swap lower 2 bytes */ + EMIT1(0x66); + if (is_ereg(a_reg)) + EMIT1(0x41); + EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); + break; + case 32: + /* emit 'bswap eax' to swap lower 4 bytes */ + if (is_ereg(a_reg)) + EMIT2(0x41, 0x0F); else - EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ - break; - case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ - seen |= SEEN_MEM; - EMIT3(0x8b, 0x45, 0xf0 - K*4); + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, a_reg)); break; - case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x8b, 0x5d, 0xf0 - K*4); - break; - case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ - seen |= SEEN_MEM; - EMIT3(0x89, 0x45, 0xf0 - K*4); - break; - case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x89, 0x5d, 0xf0 - K*4); - break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ - seen |= SEEN_XREG; - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%ebx */ - EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x9f); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - if (is_imm8(offsetof(struct sk_buff, protocol))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, protocol), 4); - } - EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ - break; - case BPF_S_ANC_IFINDEX: - if (is_imm8(offsetof(struct sk_buff, dev))) { - /* movq off8(%rdi),%rax */ - EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); - } else { - EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ - EMIT(offsetof(struct sk_buff, dev), 4); - } - EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ - EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); - EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ - EMIT(offsetof(struct net_device, ifindex), 4); - break; - case BPF_S_ANC_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - if (is_imm8(offsetof(struct sk_buff, mark))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, mark), 4); - } - break; - case BPF_S_ANC_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - if (is_imm8(offsetof(struct sk_buff, hash))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, hash), 4); - } - break; - case BPF_S_ANC_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); - if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, queue_mapping), 4); - } - break; - case BPF_S_ANC_CPU: -#ifdef CONFIG_SMP - EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ - EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ -#else - CLEAR_A(); -#endif - break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - if (is_imm8(offsetof(struct sk_buff, vlan_tci))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, vlan_tci), 4); - } - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) { - EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */ - } else { - EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */ - EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */ - } - break; - case BPF_S_ANC_PKTTYPE: - { - int off = pkt_type_offset(); - - if (off < 0) - return -EINVAL; - if (is_imm8(off)) { - /* movzbl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb6, 0x47, off); - } else { - /* movbl off32(%rdi),%eax */ - EMIT3(0x0f, 0xb6, 0x87); - EMIT(off, 4); - } - EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */ + case 64: + /* emit 'bswap rax' to swap 8 bytes */ + EMIT3(add_1mod(0x48, a_reg), 0x0F, + add_1reg(0xC8, a_reg)); break; } - case BPF_S_LD_W_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); -common_load: seen |= SEEN_DATAREF; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call */ - break; - case BPF_S_LD_H_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); - goto common_load; - case BPF_S_LD_B_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); - goto common_load; - case BPF_S_LDX_B_MSH: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); - seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ - break; - case BPF_S_LD_W_IND: - func = sk_load_word; -common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - if (K) { - if (is_imm8(K)) { - EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ - } else { - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ - EMIT(K, 4); - } - } else { - EMIT2(0x89,0xde); /* mov %ebx,%esi */ - } - EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ - break; - case BPF_S_LD_H_IND: - func = sk_load_half; - goto common_load_ind; - case BPF_S_LD_B_IND: - func = sk_load_byte; - goto common_load_ind; - case BPF_S_JMP_JA: - t_offset = addrs[i + K] - addrs[i]; - EMIT_JMP(t_offset); - break; - COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); - COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); - -cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - t_offset = addrs[i + filter[i].jt] - addrs[i]; - - /* same targets, can avoid doing the test :) */ - if (filter[i].jt == filter[i].jf) { - EMIT_JMP(t_offset); - break; - } + break; + + case BPF_ALU | BPF_END | BPF_FROM_LE: + break; + + /* ST: *(u8*)(a_reg + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC6); + else + EMIT1(0xC6); + goto st; + case BPF_ST | BPF_MEM | BPF_H: + if (is_ereg(a_reg)) + EMIT3(0x66, 0x41, 0xC7); + else + EMIT2(0x66, 0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_W: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC7); + else + EMIT1(0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_DW: + EMIT2(add_1mod(0x48, a_reg), 0xC7); + +st: if (is_imm8(insn->off)) + EMIT2(add_1reg(0x40, a_reg), insn->off); + else + EMIT1_off32(add_1reg(0x80, a_reg), insn->off); + + EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); + break; + + /* STX: *(u8*)(a_reg + off) = x_reg */ + case BPF_STX | BPF_MEM | BPF_B: + /* emit 'mov byte ptr [rax + off], al' */ + if (is_ereg(a_reg) || is_ereg(x_reg) || + /* have to add extra byte for x86 SIL, DIL regs */ + x_reg == BPF_REG_1 || x_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); + else + EMIT1(0x88); + goto stx; + case BPF_STX | BPF_MEM | BPF_H: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT2(0x66, 0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_W: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT1(0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_DW: + EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); +stx: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* LDX: a_reg = *(u8*)(x_reg + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + /* emit 'movzx rax, byte ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_H: + /* emit 'movzx rax, word ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_W: + /* emit 'mov eax, dword ptr [rax+0x14]' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); + else + EMIT1(0x8B); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_DW: + /* emit 'mov rax, qword ptr [rax+0x14]' */ + EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); +ldx: /* if insn->off == 0 we can save one extra byte, but + * special case of x86 r13 which always needs an offset + * is not worth the hassle + */ + if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, x_reg, a_reg), + insn->off); + break; + + /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ + case BPF_STX | BPF_XADD | BPF_W: + /* emit 'lock add dword ptr [rax + off], eax' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); + else + EMIT2(0xF0, 0x01); + goto xadd; + case BPF_STX | BPF_XADD | BPF_DW: + EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); +xadd: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* call */ + case BPF_JMP | BPF_CALL: + func = (u8 *) __bpf_call_base + K; + jmp_offset = func - (image + addrs[i]); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x52); /* push %r10 */ + EMIT2(0x41, 0x51); /* push %r9 */ + /* need to adjust jmp offset, since + * pop %r9, pop %r10 take 4 bytes after call insn + */ + jmp_offset += 4; + } + if (!K || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + EMIT1_off32(0xE8, jmp_offset); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x59); /* pop %r9 */ + EMIT2(0x41, 0x5A); /* pop %r10 */ + } + break; + + /* cond jump */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + /* cmp a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_X: + /* test a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_K: + /* test a_reg, imm32 */ + EMIT1(add_1mod(0x48, a_reg)); + EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + /* cmp a_reg, imm8/32 */ + EMIT1(add_1mod(0x48, a_reg)); + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(0xF8, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); + +emit_cond_jmp: /* convert BPF opcode to x86 */ + switch (BPF_OP(insn->code)) { + case BPF_JEQ: + jmp_cond = X86_JE; + break; + case BPF_JSET: + case BPF_JNE: + jmp_cond = X86_JNE; + break; + case BPF_JGT: + /* GT is unsigned '>', JA in x86 */ + jmp_cond = X86_JA; + break; + case BPF_JGE: + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = X86_JAE; + break; + case BPF_JSGT: + /* signed '>', GT in x86 */ + jmp_cond = X86_JG; + break; + case BPF_JSGE: + /* signed '>=', GE in x86 */ + jmp_cond = X86_JGE; + break; + default: /* to silence gcc warning */ + return -EFAULT; + } + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (is_imm8(jmp_offset)) { + EMIT2(jmp_cond, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + + break; - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: - seen |= SEEN_XREG; - EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ - break; - case BPF_S_JMP_JSET_X: - seen |= SEEN_XREG; - EMIT2(0x85, 0xd8); /* test %ebx,%eax */ - break; - case BPF_S_JMP_JEQ_K: - if (K == 0) { - EMIT2(0x85, 0xc0); /* test %eax,%eax */ - break; - } - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: - if (K <= 127) - EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ + case BPF_JMP | BPF_JA: + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (!jmp_offset) + /* optimize out nop jumps */ + break; +emit_jmp: + if (is_imm8(jmp_offset)) { + EMIT2(0xEB, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT1_off32(0xE9, jmp_offset); + } else { + pr_err("jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + break; + + case BPF_LD | BPF_IND | BPF_W: + func = sk_load_word; + goto common_load; + case BPF_LD | BPF_ABS | BPF_W: + func = CHOOSE_LOAD_FUNC(K, sk_load_word); +common_load: ctx->seen_ld_abs = true; + jmp_offset = func - (image + addrs[i]); + if (!func || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + if (BPF_MODE(insn->code) == BPF_ABS) { + /* mov %esi, imm32 */ + EMIT1_off32(0xBE, K); + } else { + /* mov %rsi, x_reg */ + EMIT_mov(BPF_REG_2, x_reg); + if (K) { + if (is_imm8(K)) + /* add %esi, imm8 */ + EMIT3(0x83, 0xC6, K); else - EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ - break; - case BPF_S_JMP_JSET_K: - if (K <= 0xFF) - EMIT2(0xa8, K); /* test imm8,%al */ - else if (!(K & 0xFFFF00FF)) - EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ - else if (K <= 0xFFFF) { - EMIT2(0x66, 0xa9); /* test imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0xa9, K); /* test imm32,%eax */ - } - break; + /* add %esi, imm32 */ + EMIT2_off32(0x81, 0xC6, K); } - if (filter[i].jt != 0) { - if (filter[i].jf && f_offset) - t_offset += is_near(f_offset) ? 2 : 5; - EMIT_COND_JMP(t_op, t_offset); - if (filter[i].jf) - EMIT_JMP(f_offset); - break; - } - EMIT_COND_JMP(f_op, f_offset); - break; + } + /* skb pointer is in R6 (%rbx), it will be copied into + * %rdi if skb_copy_bits() call is necessary. + * sk_load_* helpers also use %r10 and %r9d. + * See bpf_jit.S + */ + EMIT1_off32(0xE8, jmp_offset); /* call */ + break; + + case BPF_LD | BPF_IND | BPF_H: + func = sk_load_half; + goto common_load; + case BPF_LD | BPF_ABS | BPF_H: + func = CHOOSE_LOAD_FUNC(K, sk_load_half); + goto common_load; + case BPF_LD | BPF_IND | BPF_B: + func = sk_load_byte; + goto common_load; + case BPF_LD | BPF_ABS | BPF_B: + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); + goto common_load; + + case BPF_JMP | BPF_EXIT: + if (i != insn_cnt - 1) { + jmp_offset = ctx->cleanup_addr - addrs[i]; + goto emit_jmp; + } + /* update cleanup_addr */ + ctx->cleanup_addr = proglen; + /* mov rbx, qword ptr [rbp-X] */ + EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize); + /* mov r13, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8); + /* mov r14, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16); + /* mov r15, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24); + + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ + break; + default: - /* hmm, too complex filter, give up with jit compiler */ + /* By design x64 JIT should support all BPF instructions + * This error will be seen if new instruction was added + * to interpreter, but not to JIT + * or if there is junk in sk_filter + */ + pr_err("bpf_jit: unknown opcode %02x\n", insn->code); return -EINVAL; } + ilen = prog - temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpb_jit_compile fatal error\n"); + pr_err("bpf_jit_compile fatal error\n"); return -EFAULT; } memcpy(image + proglen, temp, ilen); @@ -726,21 +859,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; addrs[i] = proglen; prog = temp; } - /* last bpf instruction is always a RET : - * use it to give the cleanup instruction(s) addr - */ - ctx->cleanup_addr = proglen - 1; /* ret */ - if (seen_or_pass0) - ctx->cleanup_addr -= 1; /* leaveq */ - if (seen_or_pass0 & SEEN_XREG) - ctx->cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ - - ctx->seen = seen; - return proglen; } void bpf_jit_compile(struct sk_filter *prog) +{ +} + +void bpf_int_jit_compile(struct sk_filter *prog) { struct bpf_binary_header *header = NULL; int proglen, oldproglen = 0; @@ -768,8 +894,6 @@ void bpf_jit_compile(struct sk_filter *prog) addrs[i] = proglen; } ctx.cleanup_addr = proglen; - ctx.seen = SEEN_XREG | SEEN_DATAREF | SEEN_MEM; - ctx.pc_ret0 = -1; for (pass = 0; pass < 10; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); diff --git a/include/linux/filter.h b/include/linux/filter.h index 4457b383961c..9d5ae0a2c954 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -207,6 +207,9 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); void sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); +u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +void bpf_int_jit_compile(struct sk_filter *fp); + #ifdef CONFIG_BPF_JIT #include #include diff --git a/net/core/filter.c b/net/core/filter.c index c442a0d7d0f7..32c5b44c537e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1524,6 +1524,10 @@ out_err: return ERR_PTR(err); } +void __weak bpf_int_jit_compile(struct sk_filter *prog) +{ +} + static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, struct sock *sk) { @@ -1544,9 +1548,12 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, /* JIT compiler couldn't process this filter, so do the * internal BPF translation for the optimized interpreter. */ - if (!fp->jited) + if (!fp->jited) { fp = __sk_migrate_filter(fp, sk); + /* Probe if internal BPF can be jit-ed */ + bpf_int_jit_compile(fp); + } return fp; } -- cgit v1.2.3-59-g8ed1b From 97dc48e220282742e4c4a8fde81bdd4dbe011f1e Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 16 May 2014 23:28:54 +0200 Subject: pktgen: Use seq_puts() where seq_printf() is not needed Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- net/core/pktgen.c | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'net/core') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 0304f981f7ff..fc17a9d309ac 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) is_zero_ether_addr(pkt_dev->src_mac) ? pkt_dev->odev->dev_addr : pkt_dev->src_mac); - seq_printf(seq, "dst_mac: "); + seq_puts(seq, "dst_mac: "); seq_printf(seq, "%pM\n", pkt_dev->dst_mac); seq_printf(seq, @@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->nr_labels) { unsigned int i; - seq_printf(seq, " mpls: "); + seq_puts(seq, " mpls: "); for (i = 0; i < pkt_dev->nr_labels; i++) seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), i == pkt_dev->nr_labels-1 ? "\n" : ", "); @@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->node >= 0) seq_printf(seq, " node: %d\n", pkt_dev->node); - seq_printf(seq, " Flags: "); + seq_puts(seq, " Flags: "); if (pkt_dev->flags & F_IPV6) - seq_printf(seq, "IPV6 "); + seq_puts(seq, "IPV6 "); if (pkt_dev->flags & F_IPSRC_RND) - seq_printf(seq, "IPSRC_RND "); + seq_puts(seq, "IPSRC_RND "); if (pkt_dev->flags & F_IPDST_RND) - seq_printf(seq, "IPDST_RND "); + seq_puts(seq, "IPDST_RND "); if (pkt_dev->flags & F_TXSIZE_RND) - seq_printf(seq, "TXSIZE_RND "); + seq_puts(seq, "TXSIZE_RND "); if (pkt_dev->flags & F_UDPSRC_RND) - seq_printf(seq, "UDPSRC_RND "); + seq_puts(seq, "UDPSRC_RND "); if (pkt_dev->flags & F_UDPDST_RND) - seq_printf(seq, "UDPDST_RND "); + seq_puts(seq, "UDPDST_RND "); if (pkt_dev->flags & F_UDPCSUM) - seq_printf(seq, "UDPCSUM "); + seq_puts(seq, "UDPCSUM "); if (pkt_dev->flags & F_MPLS_RND) - seq_printf(seq, "MPLS_RND "); + seq_puts(seq, "MPLS_RND "); if (pkt_dev->flags & F_QUEUE_MAP_RND) - seq_printf(seq, "QUEUE_MAP_RND "); + seq_puts(seq, "QUEUE_MAP_RND "); if (pkt_dev->flags & F_QUEUE_MAP_CPU) - seq_printf(seq, "QUEUE_MAP_CPU "); + seq_puts(seq, "QUEUE_MAP_CPU "); if (pkt_dev->cflows) { if (pkt_dev->flags & F_FLOW_SEQ) - seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ + seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/ else - seq_printf(seq, "FLOW_RND "); + seq_puts(seq, "FLOW_RND "); } #ifdef CONFIG_XFRM if (pkt_dev->flags & F_IPSEC_ON) { - seq_printf(seq, "IPSEC "); + seq_puts(seq, "IPSEC "); if (pkt_dev->spi) seq_printf(seq, "spi:%u", pkt_dev->spi); } #endif if (pkt_dev->flags & F_MACSRC_RND) - seq_printf(seq, "MACSRC_RND "); + seq_puts(seq, "MACSRC_RND "); if (pkt_dev->flags & F_MACDST_RND) - seq_printf(seq, "MACDST_RND "); + seq_puts(seq, "MACDST_RND "); if (pkt_dev->flags & F_VID_RND) - seq_printf(seq, "VID_RND "); + seq_puts(seq, "VID_RND "); if (pkt_dev->flags & F_SVID_RND) - seq_printf(seq, "SVID_RND "); + seq_puts(seq, "SVID_RND "); if (pkt_dev->flags & F_NODE) - seq_printf(seq, "NODE_ALLOC "); + seq_puts(seq, "NODE_ALLOC "); seq_puts(seq, "\n"); @@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->result[0]) seq_printf(seq, "Result: %s\n", pkt_dev->result); else - seq_printf(seq, "Result: Idle\n"); + seq_puts(seq, "Result: Idle\n"); return 0; } @@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v) BUG_ON(!t); - seq_printf(seq, "Running: "); + seq_puts(seq, "Running: "); if_lock(t); list_for_each_entry(pkt_dev, &t->if_list, list) if (pkt_dev->running) seq_printf(seq, "%s ", pkt_dev->odevname); - seq_printf(seq, "\nStopped: "); + seq_puts(seq, "\nStopped: "); list_for_each_entry(pkt_dev, &t->if_list, list) if (!pkt_dev->running) @@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v) if (t->result[0]) seq_printf(seq, "\nResult: %s\n", t->result); else - seq_printf(seq, "\nResult: NA\n"); + seq_puts(seq, "\nResult: NA\n"); if_unlock(t); -- cgit v1.2.3-59-g8ed1b From fb95cd8d1473b1cc90eccbd6a30641f3851c8506 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 15 May 2014 00:46:45 +0100 Subject: ethtool: Return immediately on error in ethtool_copy_validate_indir() We must return -EFAULT immediately rather than continuing into the loop. Similarly, we may as well return -EINVAL directly. Signed-off-by: Ben Hutchings --- net/core/ethtool.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'net/core') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index aa8978ac47d2..c834cb29f682 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -561,19 +561,17 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, struct ethtool_rxnfc *rx_rings, u32 size) { - int ret = 0, i; + int i; if (copy_from_user(indir, useraddr, size * sizeof(indir[0]))) - ret = -EFAULT; + return -EFAULT; /* Validate ring indices */ - for (i = 0; i < size; i++) { - if (indir[i] >= rx_rings->data) { - ret = -EINVAL; - break; - } - } - return ret; + for (i = 0; i < size; i++) + if (indir[i] >= rx_rings->data) + return -EINVAL; + + return 0; } static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, -- cgit v1.2.3-59-g8ed1b From 7455fa2422898eee3464032351d20695930d9542 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 15 May 2014 01:41:23 +0100 Subject: ethtool: Name the 'no change' value for setting RSS hash key but not indir table We usually allocate special values of u32 fields starting from the top down, so also change the value to 0xffffffff. As these operations haven't been included in a stable release yet, it's not too late to change. Signed-off-by: Ben Hutchings --- include/uapi/linux/ethtool.h | 12 +++++++----- net/core/ethtool.c | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'net/core') diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index d47d31d6fa0e..cba18e3f8fab 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -850,7 +850,8 @@ struct ethtool_rxfh_indir { * struct ethtool_rxfh - command to get/set RX flow hash indir or/and hash key. * @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH * @rss_context: RSS context identifier. - * @indir_size: On entry, the array size of the user buffer, which may be zero. + * @indir_size: On entry, the array size of the user buffer, which may be zero, + * or (for %ETHTOOL_SRSSH), %ETH_RXFH_INDIR_NO_CHANGE. * On return from %ETHTOOL_GRSSH, the array size of the hardware * indirection table. * @key_size: On entry, the array size of the user buffer in bytes, @@ -861,10 +862,10 @@ struct ethtool_rxfh_indir { * of size @indir_size followed by hash key of size @key_size. * * For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the - * size should be returned. For %ETHTOOL_SRSSH, a @indir_size of 0xDEADBEEF - * means that indir table setting is not requested and a @indir_size of zero - * means the indir table should be reset to default values. This last feature - * is not supported by the original implementations. + * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of + * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested + * and a @indir_size of zero means the indir table should be reset to default + * values. */ struct ethtool_rxfh { __u32 cmd; @@ -874,6 +875,7 @@ struct ethtool_rxfh { __u32 rsvd[2]; __u32 rss_config[0]; }; +#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff /** * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter diff --git a/net/core/ethtool.c b/net/core/ethtool.c index c834cb29f682..7156fe5ca876 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -803,12 +803,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, /* If either indir or hash key is valid, proceed further. */ - if ((user_indir_size && ((user_indir_size != 0xDEADBEEF) && - user_indir_size != dev_indir_size)) || + if ((user_indir_size && + user_indir_size != ETH_RXFH_INDIR_NO_CHANGE && + user_indir_size != dev_indir_size) || (user_key_size && (user_key_size != dev_key_size))) return -EINVAL; - if (user_indir_size != 0xDEADBEEF) + if (user_indir_size != ETH_RXFH_INDIR_NO_CHANGE) indir_bytes = dev_indir_size * sizeof(indir[0]); rss_config = kzalloc(indir_bytes + user_key_size, GFP_USER); @@ -821,9 +822,10 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, goto out; /* user_indir_size == 0 means reset the indir table to default. - * user_indir_size == 0xDEADBEEF means indir setting is not requested. + * user_indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. */ - if (user_indir_size && user_indir_size != 0xDEADBEEF) { + if (user_indir_size && + user_indir_size != ETH_RXFH_INDIR_NO_CHANGE) { indir = (u32 *)rss_config; ret = ethtool_copy_validate_indir(indir, useraddr + rss_cfg_offset, -- cgit v1.2.3-59-g8ed1b From 61d88c6811f216de4ec26aafe24e650dc1aeb00e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 19 May 2014 01:29:42 +0100 Subject: ethtool: Disallow ETHTOOL_SRSSH with both indir table and hash key unchanged This would be a no-op, so there is no reason to request it. This also allows conversion of the current implementations of ethtool_ops::{get,set}_rxfh_indir to ethtool_ops::{get,set}_rxfh with no change other than their parameters. Signed-off-by: Ben Hutchings --- include/linux/ethtool.h | 4 ++-- net/core/ethtool.c | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'net/core') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 886e127d51a6..de687a97c6e7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -168,8 +168,8 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. * Will not be called if @get_rxfh_indir_size returns zero. * @set_rxfh: Set the contents of the RX flow hash indirection table and/or - * hash key. Either or both arguments may be %NULL if that attribute - * is not to be changed. + * hash key. In case only the indirection table or hash key is to be + * changed, the other argument will be %NULL. * Will only be called if one or both of @get_rxfh_indir_size and * @get_rxfh_key_size are implemented and return non-zero. * Returns a negative error code or zero. diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 7156fe5ca876..b8857348bdf3 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -802,11 +802,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, return -EFAULT; /* If either indir or hash key is valid, proceed further. + * It is not valid to request that both be unchanged. */ if ((user_indir_size && user_indir_size != ETH_RXFH_INDIR_NO_CHANGE && user_indir_size != dev_indir_size) || - (user_key_size && (user_key_size != dev_key_size))) + (user_key_size && (user_key_size != dev_key_size)) || + (user_indir_size == ETH_RXFH_INDIR_NO_CHANGE && + user_key_size == 0)) return -EINVAL; if (user_indir_size != ETH_RXFH_INDIR_NO_CHANGE) -- cgit v1.2.3-59-g8ed1b From 5fe821a9dee241fa450703ab7015d970ee0cfb8d Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 19 May 2014 14:56:14 -0700 Subject: net: filter: cleanup invocation of internal BPF Kernel API for classic BPF socket filters is: sk_unattached_filter_create() - validate classic BPF, convert, JIT SK_RUN_FILTER() - run it sk_unattached_filter_destroy() - destroy socket filter Cleanup internal BPF kernel API as following: sk_filter_select_runtime() - final step of internal BPF creation. Try to JIT internal BPF program, if JIT is not available select interpreter SK_RUN_FILTER() - run it sk_filter_free() - free internal BPF program Disallow direct calls to BPF interpreter. Execution of the BPF program should be done with SK_RUN_FILTER() macro. Example of internal BPF create, run, destroy: struct sk_filter *fp; fp = kzalloc(sk_filter_size(prog_len), GFP_KERNEL); memcpy(fp->insni, prog, prog_len * sizeof(fp->insni[0])); fp->len = prog_len; sk_filter_select_runtime(fp); SK_RUN_FILTER(fp, ctx); sk_filter_free(fp); Sockets, seccomp, testsuite, tracing are using different ways to populate sk_filter, so first steps of program creation are not common. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/filter.h | 6 ++---- kernel/seccomp.c | 6 ++---- lib/test_bpf.c | 4 ++-- net/core/filter.c | 44 ++++++++++++++++++++++++++++---------------- 4 files changed, 34 insertions(+), 26 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 9d5ae0a2c954..7977b3958e25 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -184,10 +184,8 @@ static inline unsigned int sk_filter_size(unsigned int proglen) int sk_filter(struct sock *sk, struct sk_buff *skb); -u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, - const struct sock_filter_int *insni); -u32 sk_run_filter_int_skb(const struct sk_buff *ctx, - const struct sock_filter_int *insni); +void sk_filter_select_runtime(struct sk_filter *fp); +void sk_filter_free(struct sk_filter *fp); int sk_convert_filter(struct sock_filter *prog, int len, struct sock_filter_int *new_prog, int *new_len); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 7e02d624cc50..1036b6f2fded 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -273,10 +273,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) atomic_set(&filter->usage, 1); filter->prog->len = new_len; - filter->prog->bpf_func = (void *)sk_run_filter_int_seccomp; - /* JIT internal BPF into native HW instructions */ - bpf_int_jit_compile(filter->prog); + sk_filter_select_runtime(filter->prog); /* * If there is an existing filter, make it the prev and don't drop its @@ -340,7 +338,7 @@ void put_seccomp_filter(struct task_struct *tsk) while (orig && atomic_dec_and_test(&orig->usage)) { struct seccomp_filter *freeme = orig; orig = orig->prev; - bpf_jit_free(freeme->prog); + sk_filter_free(freeme->prog); kfree(freeme); } } diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 3603ebcd5d65..e160934430eb 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1489,7 +1489,7 @@ static __init int test_bpf(void) memcpy(fp_ext->insns, tests[i].insns_int, fprog.len * 8); fp->len = fprog.len; - fp->bpf_func = sk_run_filter_int_skb; + sk_filter_select_runtime(fp); } else { err = sk_unattached_filter_create(&fp, &fprog); if (tests[i].data_type == EXPECTED_FAIL) { @@ -1516,7 +1516,7 @@ static __init int test_bpf(void) if (tests[i].data_type != SKB_INT) sk_unattached_filter_destroy(fp); else - kfree(fp); + sk_filter_free(fp); if (err) { pr_cont("FAIL %d\n", err); diff --git a/net/core/filter.c b/net/core/filter.c index 32c5b44c537e..7067cb240d3e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -153,7 +153,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * keep, 0 for none. @ctx is the data we are operating on, @insn is the * array of filter instructions. */ -unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) +static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) { u64 stack[MAX_BPF_STACK / sizeof(u64)]; u64 regs[MAX_BPF_REG], tmp; @@ -571,15 +571,6 @@ load_byte: return 0; } -u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, - const struct sock_filter_int *insni) - __attribute__ ((alias ("__sk_run_filter"))); - -u32 sk_run_filter_int_skb(const struct sk_buff *ctx, - const struct sock_filter_int *insni) - __attribute__ ((alias ("__sk_run_filter"))); -EXPORT_SYMBOL_GPL(sk_run_filter_int_skb); - /* Helper to find the offset of pkt_type in sk_buff structure. We want * to make sure its still a 3bit field starting at a byte boundary; * taken from arch/x86/net/bpf_jit_comp.c. @@ -1397,7 +1388,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu) struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); sk_release_orig_filter(fp); - bpf_jit_free(fp); + sk_filter_free(fp); } /** @@ -1497,7 +1488,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, goto out_err_free; } - fp->bpf_func = sk_run_filter_int_skb; fp->len = new_len; /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */ @@ -1510,6 +1500,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, */ goto out_err_free; + sk_filter_select_runtime(fp); + kfree(old_prog); return fp; @@ -1528,6 +1520,29 @@ void __weak bpf_int_jit_compile(struct sk_filter *prog) { } +/** + * sk_filter_select_runtime - select execution runtime for BPF program + * @fp: sk_filter populated with internal BPF program + * + * try to JIT internal BPF program, if JIT is not available select interpreter + * BPF program will be executed via SK_RUN_FILTER() macro + */ +void sk_filter_select_runtime(struct sk_filter *fp) +{ + fp->bpf_func = (void *) __sk_run_filter; + + /* Probe if internal BPF can be JITed */ + bpf_int_jit_compile(fp); +} +EXPORT_SYMBOL_GPL(sk_filter_select_runtime); + +/* free internal BPF program */ +void sk_filter_free(struct sk_filter *fp) +{ + bpf_jit_free(fp); +} +EXPORT_SYMBOL_GPL(sk_filter_free); + static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, struct sock *sk) { @@ -1548,12 +1563,9 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, /* JIT compiler couldn't process this filter, so do the * internal BPF translation for the optimized interpreter. */ - if (!fp->jited) { + if (!fp->jited) fp = __sk_migrate_filter(fp, sk); - /* Probe if internal BPF can be jit-ed */ - bpf_int_jit_compile(fp); - } return fp; } -- cgit v1.2.3-59-g8ed1b From e876f208af18b074f800656e4d1b99da75b2135f Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Mon, 19 May 2014 13:59:52 -0300 Subject: net: Add a software TSO helper API Although the implementation probably needs a lot of work, this initial API allows to implement software TSO in mvneta and mv643xx_eth drivers in a not so intrusive way. Signed-off-by: Ezequiel Garcia Signed-off-by: David S. Miller --- include/net/tso.h | 20 ++++++++++++++++ net/core/Makefile | 2 +- net/core/tso.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 include/net/tso.h create mode 100644 net/core/tso.c (limited to 'net/core') diff --git a/include/net/tso.h b/include/net/tso.h new file mode 100644 index 000000000000..47e5444f7d15 --- /dev/null +++ b/include/net/tso.h @@ -0,0 +1,20 @@ +#ifndef _TSO_H +#define _TSO_H + +#include + +struct tso_t { + int next_frag_idx; + void *data; + size_t size; + u16 ip_id; + u32 tcp_seq; +}; + +int tso_count_descs(struct sk_buff *skb); +void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, + int size, bool is_last); +void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); +void tso_start(struct sk_buff *skb, struct tso_t *tso); + +#endif /* _TSO_H */ diff --git a/net/core/Makefile b/net/core/Makefile index 826b925aa453..71093d94ad2b 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ - sock_diag.o dev_ioctl.o + sock_diag.o dev_ioctl.o tso.o obj-$(CONFIG_XFRM) += flow.o obj-y += net-sysfs.o diff --git a/net/core/tso.c b/net/core/tso.c new file mode 100644 index 000000000000..097821dd3a8c --- /dev/null +++ b/net/core/tso.c @@ -0,0 +1,72 @@ +#include +#include + +/* Calculate expected number of TX descriptors */ +int tso_count_descs(struct sk_buff *skb) +{ + /* The Marvell Way */ + return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; +} + +void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, + int size, bool is_last) +{ + struct iphdr *iph; + struct tcphdr *tcph; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int mac_hdr_len = skb_network_offset(skb); + + memcpy(hdr, skb->data, hdr_len); + iph = (struct iphdr *)(hdr + mac_hdr_len); + iph->id = htons(tso->ip_id); + iph->tot_len = htons(size + hdr_len - mac_hdr_len); + tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); + tcph->seq = htonl(tso->tcp_seq); + tso->ip_id++; + + if (!is_last) { + /* Clear all special flags for not last packet */ + tcph->psh = 0; + tcph->fin = 0; + tcph->rst = 0; + } +} + +void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) +{ + tso->tcp_seq += size; + tso->size -= size; + tso->data += size; + + if ((tso->size == 0) && + (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; + + /* Move to next segment */ + tso->size = frag->size; + tso->data = page_address(frag->page.p) + frag->page_offset; + tso->next_frag_idx++; + } +} + +void tso_start(struct sk_buff *skb, struct tso_t *tso) +{ + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + tso->ip_id = ntohs(ip_hdr(skb)->id); + tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); + tso->next_frag_idx = 0; + + /* Build first data */ + tso->size = skb_headlen(skb) - hdr_len; + tso->data = skb->data + hdr_len; + if ((tso->size == 0) && + (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; + + /* Move to next segment */ + tso->size = frag->size; + tso->data = page_address(frag->page.p) + frag->page_offset; + tso->next_frag_idx++; + } +} -- cgit v1.2.3-59-g8ed1b From ed616689a3d95eb6c9bdbb1ef74b0f50cbdf276a Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Thu, 22 May 2014 09:59:05 -0400 Subject: net-next:v4: Add support to configure SR-IOV VF minimum and maximum Tx rate through ip tool. o min_tx_rate puts lower limit on the VF bandwidth. VF is guaranteed to have a bandwidth of at least this value. max_tx_rate puts cap on the VF bandwidth. VF can have a bandwidth of up to this value. o A new handler set_vf_rate for attr IFLA_VF_RATE has been introduced which takes 4 arguments: netdev, VF number, min_tx_rate, max_tx_rate o ndo_set_vf_rate replaces ndo_set_vf_tx_rate handler. o Drivers that currently implement ndo_set_vf_tx_rate should now call ndo_set_vf_rate instead and reject attempt to set a minimum bandwidth greater than 0 for IFLA_VF_TX_RATE when IFLA_VF_RATE is not yet implemented by driver. o If user enters only one of either min_tx_rate or max_tx_rate, then, userland should read back the other value from driver and set both for IFLA_VF_RATE. Drivers that have not yet implemented IFLA_VF_RATE should always return min_tx_rate as 0 when read from ip tool. o If both IFLA_VF_TX_RATE and IFLA_VF_RATE options are specified, then IFLA_VF_RATE should override. o Idea is to have consistent display of rate values to user. o Usage example: - ./ip link set p4p1 vf 0 rate 900 ./ip link show p4p1 32: p4p1: mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000 link/ether 00:0e:1e:08:b0:f0 brd ff:ff:ff:ff:ff:ff vf 0 MAC 3e:a0:ca:bd:ae:5a, tx rate 900 (Mbps), max_tx_rate 900Mbps vf 1 MAC f6:c6:7c:3f:3d:6c vf 2 MAC 56:32:43:98:d7:71 vf 3 MAC d6:be:c3:b5:85:ff vf 4 MAC ee:a9:9a:1e:19:14 vf 5 MAC 4a:d0:4c:07:52:18 vf 6 MAC 3a:76:44:93:62:f9 vf 7 MAC 82:e9:e7:e3:15:1a ./ip link set p4p1 vf 0 max_tx_rate 300 min_tx_rate 200 ./ip link show p4p1 32: p4p1: mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000 link/ether 00:0e:1e:08:b0:f0 brd ff:ff:ff:ff:ff:ff vf 0 MAC 3e:a0:ca:bd:ae:5a, tx rate 300 (Mbps), max_tx_rate 300Mbps, min_tx_rate 200Mbps vf 1 MAC f6:c6:7c:3f:3d:6c vf 2 MAC 56:32:43:98:d7:71 vf 3 MAC d6:be:c3:b5:85:ff vf 4 MAC ee:a9:9a:1e:19:14 vf 5 MAC 4a:d0:4c:07:52:18 vf 6 MAC 3a:76:44:93:62:f9 vf 7 MAC 82:e9:e7:e3:15:1a ./ip link set p4p1 vf 0 max_tx_rate 600 rate 300 ./ip link show p4p1 32: p4p1: mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000 link/ether 00:0e:1e:08:b0:f brd ff:ff:ff:ff:ff:ff vf 0 MAC 3e:a0:ca:bd:ae:5, tx rate 600 (Mbps), max_tx_rate 600Mbps, min_tx_rate 200Mbps vf 1 MAC f6:c6:7c:3f:3d:6c vf 2 MAC 56:32:43:98:d7:71 vf 3 MAC d6:be:c3:b5:85:ff vf 4 MAC ee:a9:9a:1e:19:14 vf 5 MAC 4a:d0:4c:07:52:18 vf 6 MAC 3a:76:44:93:62:f9 vf 7 MAC 82:e9:e7:e3:15:1a Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 3 +- drivers/net/ethernet/emulex/benet/be_main.c | 21 +++++---- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 26 +++++++---- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 3 +- drivers/net/ethernet/intel/igb/igb_main.c | 20 ++++++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 13 ++++-- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 3 +- drivers/net/ethernet/mellanox/mlx4/cmd.c | 11 ++--- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 2 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 1 + .../net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 52 +++++++++++++++------- drivers/net/ethernet/sfc/siena_sriov.c | 3 +- include/linux/if_link.h | 3 +- include/linux/netdevice.h | 8 ++-- include/uapi/linux/if_link.h | 9 +++- net/core/rtnetlink.c | 38 +++++++++++++--- 20 files changed, 156 insertions(+), 67 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 81cc2d9831c2..8d0479d5be8e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2576,7 +2576,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->vf = vfidx; ivi->qos = 0; - ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->min_tx_rate = 0; ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index dcc5e5c69743..4693d004a223 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1302,7 +1302,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf, return -EINVAL; vi->vf = vf; - vi->tx_rate = vf_cfg->tx_rate; + vi->max_tx_rate = vf_cfg->tx_rate; + vi->min_tx_rate = 0; vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); @@ -1342,7 +1343,8 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) return status; } -static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate) +static int be_set_vf_tx_rate(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; @@ -1353,18 +1355,21 @@ static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate) if (vf >= adapter->num_vfs) return -EINVAL; - if (rate < 100 || rate > 10000) { + if (min_tx_rate) + return -EINVAL; + + if (max_tx_rate < 100 || max_tx_rate > 10000) { dev_err(&adapter->pdev->dev, - "tx rate must be between 100 and 10000 Mbps\n"); + "max tx rate must be between 100 and 10000 Mbps\n"); return -EINVAL; } - status = be_cmd_config_qos(adapter, rate / 10, vf + 1); + status = be_cmd_config_qos(adapter, max_tx_rate / 10, vf + 1); if (status) dev_err(&adapter->pdev->dev, - "tx rate %d on VF %d failed\n", rate, vf); + "max tx rate %d on VF %d failed\n", max_tx_rate, vf); else - adapter->vf_cfg[vf].tx_rate = rate; + adapter->vf_cfg[vf].tx_rate = max_tx_rate; return status; } static int be_set_vf_link_state(struct net_device *netdev, int vf, @@ -4257,7 +4262,7 @@ static const struct net_device_ops be_netdev_ops = { .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, .ndo_set_vf_mac = be_set_vf_mac, .ndo_set_vf_vlan = be_set_vf_vlan, - .ndo_set_vf_tx_rate = be_set_vf_tx_rate, + .ndo_set_vf_rate = be_set_vf_tx_rate, .ndo_get_vf_config = be_get_vf_config, .ndo_set_vf_link_state = be_set_vf_link_state, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e0e5c6a867b1..96f7fabd8758 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6724,7 +6724,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, - .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, + .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, #ifdef CONFIG_I40E_VXLAN diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d219566a04d..8564b0939dc4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2205,7 +2205,8 @@ error_pvid: * * configure vf tx rate **/ -int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -2221,6 +2222,12 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) goto error; } + if (min_tx_rate) { + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_index]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { @@ -2243,23 +2250,23 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) break; } - if (tx_rate > speed) { - dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.", - tx_rate, vf->vf_id); + if (max_tx_rate > speed) { + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", + max_tx_rate, vf->vf_id); ret = -EINVAL; goto error; } /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ - ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0, - NULL); + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, max_tx_rate / 50, + 0, NULL); if (ret) { - dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n", + dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", ret); ret = -EIO; goto error; } - vf->tx_rate = tx_rate; + vf->tx_rate = max_tx_rate; error: return ret; } @@ -2301,7 +2308,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); - ivi->tx_rate = vf->tx_rate; + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> I40E_VLAN_PRIORITY_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index ba3d1f8414be..5a559be4ba2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -116,7 +116,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf); int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); -int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index bfcda8a455f4..1075b3f8c415 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -169,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter); static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, @@ -2084,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, .ndo_set_vf_mac = igb_ndo_set_vf_mac, .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, .ndo_get_vf_config = igb_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -7879,7 +7879,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter) } } -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -7888,15 +7889,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) if (hw->mac.type != e1000_82576) return -EOPNOTSUPP; + if (min_tx_rate) + return -EINVAL; + actual_link_speed = igb_link_mbps(adapter->link_speed); if ((vf >= adapter->vfs_allocated_count) || (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || - (tx_rate < 0) || (tx_rate > actual_link_speed)) + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) return -EINVAL; adapter->vf_rate_link_speed = actual_link_speed; - adapter->vf_data[vf].tx_rate = (u16)tx_rate; - igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); return 0; } @@ -7936,7 +7941,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = adapter->vf_data[vf].tx_rate; + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8089ea9f2fba..a5332389620a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7926,7 +7926,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_do_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index a01417c06620..3248e208c9dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1222,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) } } -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int link_speed; @@ -1240,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) if (link_speed != 10000) return -EINVAL; + if (min_tx_rate) + return -EINVAL; + /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) return -EINVAL; /* store values */ adapter->vf_rate_link_speed = link_speed; - adapter->vfinfo[vf].tx_rate = tx_rate; + adapter->vfinfo[vf].tx_rate = max_tx_rate; /* update hardware configuration */ ixgbe_set_vf_rate_limit(adapter, vf); @@ -1288,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index cea640147604..32c26d586c01 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -44,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 59c7fd406805..ca8e7cb5a8e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2486,11 +2486,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); ivf->mac[5] = ((s_info->mac) & 0xff); - ivf->vlan = s_info->default_vlan; - ivf->qos = s_info->default_qos; - ivf->tx_rate = s_info->tx_rate; - ivf->spoofchk = s_info->spoofchk; - ivf->linkstate = s_info->link_state; + ivf->vlan = s_info->default_vlan; + ivf->qos = s_info->default_qos; + ivf->max_tx_rate = s_info->tx_rate; + ivf->min_tx_rate = 0; + ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 6e7527e2b595..41abe6070466 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1319,6 +1319,7 @@ struct qlcnic_eswitch { #define QL_STATUS_INVALID_PARAM -1 #define MAX_BW 100 /* % of link speed */ +#define MIN_BW 1 /* % of link speed */ #define MAX_VLAN_ID 4095 #define MIN_VLAN_ID 2 #define DEFAULT_MAC_LEARN 1 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f0a285359e66..f06ba90b4282 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -525,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { #endif #ifdef CONFIG_QLCNIC_SRIOV .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, - .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, + .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, .ndo_get_vf_config = qlcnic_sriov_get_vf_config, .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 335b50f7bd3e..4677b2edccca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -233,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *, void qlcnic_sriov_pf_reset(struct qlcnic_adapter *); int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *); int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *); -int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); +int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int); int qlcnic_sriov_get_vf_config(struct net_device *, int , struct ifla_vf_info *); int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 498fa6350c8d..2bdd9deffb38 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -201,6 +201,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) sriov->vf_info[i].vp = vp; vp->vlan_mode = QLC_GUEST_VLAN_MODE; vp->max_tx_bw = MAX_BW; + vp->min_tx_bw = MIN_BW; vp->spoofchk = false; random_ether_addr(vp->mac); dev_info(&adapter->pdev->dev, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 6d2f72f114f2..a29538b86edf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1848,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return 0; } -int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) +int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; @@ -1863,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) if (vf >= sriov->num_vfs) return -EINVAL; - if (tx_rate >= 10000 || tx_rate < 100) { + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + vpid = vp->handle; + + if (!min_tx_rate) + min_tx_rate = QLC_VF_MIN_TX_RATE; + + if (max_tx_rate && + (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) { netdev_err(netdev, - "Invalid Tx rate, allowed range is [%d - %d]", - QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE); + "Invalid max Tx rate, allowed range is [%d - %d]", + min_tx_rate, QLC_VF_MAX_TX_RATE); return -EINVAL; } - if (tx_rate == 0) - tx_rate = 10000; + if (!max_tx_rate) + max_tx_rate = 10000; - vf_info = &sriov->vf_info[vf]; - vp = vf_info->vp; - vpid = vp->handle; + if (min_tx_rate && + (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) { + netdev_err(netdev, + "Invalid min Tx rate, allowed range is [%d - %d]", + QLC_VF_MIN_TX_RATE, max_tx_rate); + return -EINVAL; + } if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) return -EIO; - nic_info.max_tx_bw = tx_rate / 100; + nic_info.max_tx_bw = max_tx_rate / 100; + nic_info.min_tx_bw = min_tx_rate / 100; nic_info.bit_offsets = BIT_0; if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) return -EIO; } - vp->max_tx_bw = tx_rate / 100; + vp->max_tx_bw = max_tx_rate / 100; + netdev_info(netdev, + "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", + max_tx_rate, vp->max_tx_bw, vf); + vp->min_tx_bw = min_tx_rate / 100; netdev_info(netdev, - "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", - tx_rate, vp->max_tx_bw, vf); + "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", + min_tx_rate, vp->min_tx_bw, vf); return 0; } @@ -1990,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, ivi->qos = vp->qos; ivi->spoofchk = vp->spoofchk; if (vp->max_tx_bw == MAX_BW) - ivi->tx_rate = 0; + ivi->max_tx_rate = 0; + else + ivi->max_tx_rate = vp->max_tx_bw * 100; + if (vp->min_tx_bw == MIN_BW) + ivi->min_tx_rate = 0; else - ivi->tx_rate = vp->max_tx_bw * 100; + ivi->min_tx_rate = vp->min_tx_bw * 100; ivi->vf = vf; return 0; diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 9a9205e77896..43d2e64546ed 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, ivi->vf = vf_i; ether_addr_copy(ivi->mac, vf->addr.mac_addr); - ivi->tx_rate = 0; + ivi->max_tx_rate = 0; + ivi->min_tx_rate = 0; tci = ntohs(vf->addr.tci); ivi->vlan = tci & VLAN_VID_MASK; ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; diff --git a/include/linux/if_link.h b/include/linux/if_link.h index a86784dec3d3..119130e9298b 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -10,8 +10,9 @@ struct ifla_vf_info { __u8 mac[32]; __u32 vlan; __u32 qos; - __u32 tx_rate; __u32 spoofchk; __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f4ad247fd324..9ec3a945caf2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -850,7 +850,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * SR-IOV management functions. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); - * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); + * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, + * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); @@ -1044,8 +1045,9 @@ struct net_device_ops { int queue, u8 *mac); int (*ndo_set_vf_vlan)(struct net_device *dev, int queue, u16 vlan, u8 qos); - int (*ndo_set_vf_tx_rate)(struct net_device *dev, - int vf, int rate); + int (*ndo_set_vf_rate)(struct net_device *dev, + int vf, int min_tx_rate, + int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 9a7f7ace6649..622e7910b8cc 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -399,9 +399,10 @@ enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, - IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ + IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ + IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ __IFLA_VF_MAX, }; @@ -423,6 +424,12 @@ struct ifla_vf_tx_rate { __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ }; +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; /* Min Bandwidth in Mbps */ + __u32 max_tx_rate; /* Max Bandwidth in Mbps */ +}; + struct ifla_vf_spoofchk { __u32 vf; __u32 setting; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9837bebf93ce..d6417464dc66 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -767,8 +767,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, size += num_vfs * (nla_total_size(sizeof(struct ifla_vf_mac)) + nla_total_size(sizeof(struct ifla_vf_vlan)) + - nla_total_size(sizeof(struct ifla_vf_tx_rate)) + - nla_total_size(sizeof(struct ifla_vf_spoofchk))); + nla_total_size(sizeof(struct ifla_vf_spoofchk)) + + nla_total_size(sizeof(struct ifla_vf_rate))); return size; } else return 0; @@ -1034,6 +1034,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct ifla_vf_info ivi; struct ifla_vf_mac vf_mac; struct ifla_vf_vlan vf_vlan; + struct ifla_vf_rate vf_rate; struct ifla_vf_tx_rate vf_tx_rate; struct ifla_vf_spoofchk vf_spoofchk; struct ifla_vf_link_state vf_linkstate; @@ -1054,6 +1055,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, break; vf_mac.vf = vf_vlan.vf = + vf_rate.vf = vf_tx_rate.vf = vf_spoofchk.vf = vf_linkstate.vf = ivi.vf; @@ -1061,7 +1063,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; - vf_tx_rate.rate = ivi.tx_rate; + vf_tx_rate.rate = ivi.max_tx_rate; + vf_rate.min_tx_rate = ivi.min_tx_rate; + vf_rate.max_tx_rate = ivi.max_tx_rate; vf_spoofchk.setting = ivi.spoofchk; vf_linkstate.link_state = ivi.linkstate; vf = nla_nest_start(skb, IFLA_VF_INFO); @@ -1071,6 +1075,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, } if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || + nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), + &vf_rate) || nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate) || nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), @@ -1177,6 +1183,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { .len = sizeof(struct ifla_vf_tx_rate) }, [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_spoofchk) }, + [IFLA_VF_RATE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_vf_rate) }, }; static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { @@ -1336,11 +1344,29 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) } case IFLA_VF_TX_RATE: { struct ifla_vf_tx_rate *ivt; + struct ifla_vf_info ivf; ivt = nla_data(vf); err = -EOPNOTSUPP; - if (ops->ndo_set_vf_tx_rate) - err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, - ivt->rate); + if (ops->ndo_get_vf_config) + err = ops->ndo_get_vf_config(dev, ivt->vf, + &ivf); + if (err) + break; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivf.min_tx_rate, + ivt->rate); + break; + } + case IFLA_VF_RATE: { + struct ifla_vf_rate *ivt; + ivt = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, + ivt->min_tx_rate, + ivt->max_tx_rate); break; } case IFLA_VF_SPOOFCHK: { -- cgit v1.2.3-59-g8ed1b From 28448b80456feafe07e2d05b6363b00f61f6171e Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 23 May 2014 08:47:19 -0700 Subject: net: Split sk_no_check into sk_no_check_{rx,tx} Define separate fields in the sock structure for configuring disabling checksums in both TX and RX-- sk_no_check_tx and sk_no_check_rx. The SO_NO_CHECK socket option only affects sk_no_check_tx. Also, removed UDP_CSUM_* defines since they are no longer necessary. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/scsi/iscsi_tcp.c | 2 +- include/net/sock.h | 6 ++++-- include/net/udp.h | 9 --------- net/appletalk/ddp.c | 2 +- net/core/sock.c | 4 ++-- net/decnet/af_decnet.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/udp.c | 6 +++--- net/ipx/af_ipx.c | 2 +- net/ipx/ipx_route.c | 3 ++- net/l2tp/l2tp_core.c | 4 ++-- net/l2tp/l2tp_netlink.c | 3 +-- net/sctp/socket.c | 3 ++- 13 files changed, 21 insertions(+), 27 deletions(-) (limited to 'net/core') diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 11854845393b..a669f2d11c31 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn) sk->sk_data_ready = tcp_sw_conn->old_data_ready; sk->sk_state_change = tcp_sw_conn->old_state_change; sk->sk_write_space = tcp_sw_conn->old_write_space; - sk->sk_no_check = 0; + sk->sk_no_check_tx = 0; write_unlock_bh(&sk->sk_callback_lock); } diff --git a/include/net/sock.h b/include/net/sock.h index 21569cf456ed..07b7fcd60d80 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -243,7 +243,8 @@ struct cg_proto; * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings - * @sk_no_check: %SO_NO_CHECK setting, whether or not checkup packets + * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets + * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) @@ -371,7 +372,8 @@ struct sock { struct sk_buff_head sk_write_queue; kmemcheck_bitfield_begin(flags); unsigned int sk_shutdown : 2, - sk_no_check : 2, + sk_no_check_tx : 1, + sk_no_check_rx : 1, sk_userlocks : 4, sk_protocol : 8, sk_type : 16; diff --git a/include/net/udp.h b/include/net/udp.h index a24f0f3e107f..5eb86874bcd6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -95,15 +95,6 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, return &table->hash2[hash & table->mask]; } -/* Note: this must match 'valbool' in sock_setsockopt */ -#define UDP_CSUM_NOXMIT 1 - -/* Used by SunRPC/xprt layer. */ -#define UDP_CSUM_NORCV 2 - -/* Default, as per the RFC, is to always do csums. */ -#define UDP_CSUM_DEFAULT 0 - extern struct proto udp_prot; extern atomic_long_t udp_memory_allocated; diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 786ee2f83d5f..01a1082e02b3 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1669,7 +1669,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr goto out; } - if (sk->sk_no_check == 1) + if (sk->sk_no_check_tx) ddp->deh_sum = 0; else ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp)); diff --git a/net/core/sock.c b/net/core/sock.c index 664ee4295b6f..026e01f70274 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -784,7 +784,7 @@ set_rcvbuf: break; case SO_NO_CHECK: - sk->sk_no_check = valbool; + sk->sk_no_check_tx = valbool; break; case SO_PRIORITY: @@ -1064,7 +1064,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_NO_CHECK: - v.val = sk->sk_no_check; + v.val = sk->sk_no_check_tx; break; case SO_PRIORITY: diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 4c04848953bd..ae011b46c071 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -481,7 +481,7 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf sk->sk_backlog_rcv = dn_nsp_backlog_rcv; sk->sk_destruct = dn_destruct; - sk->sk_no_check = 1; + sk->sk_no_check_tx = 1; sk->sk_family = PF_DECnet; sk->sk_protocol = 0; sk->sk_allocation = gfp; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 590532a7bd2d..12c6175b29cd 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -785,7 +785,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); - else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ + else if (sk->sk_no_check_tx) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c7ed47bde54b..b8db453133aa 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -797,7 +797,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, /* If zero checksum and sk_no_check is not on for * the socket then skip it. */ - if (uh->check || sk->sk_no_check) + if (uh->check || sk->sk_no_check_rx) stack[count++] = sk; sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, @@ -887,7 +887,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; - if (!uh->check && !sk->sk_no_check) { + if (!uh->check && !sk->sk_no_check_rx) { sock_put(sk); udp6_csum_zero_error(skb); goto csum_error; @@ -1037,7 +1037,7 @@ static int udp_v6_push_pending_frames(struct sock *sk) if (is_udplite) csum = udplite_csum_outgoing(sk, skb); - else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ + else if (sk->sk_no_check_tx) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 41e4e93cb3aa..91729b807c7d 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1353,7 +1353,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol, sk_refcnt_debug_inc(sk); sock_init_data(sock, sk); - sk->sk_no_check = 1; /* Checksum off by default */ + sk->sk_no_check_tx = 1; /* Checksum off by default */ sock->ops = &ipx_dgram_ops; rc = 0; out: diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index c1f03185c5e1..67e7ad3d46b1 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c @@ -236,7 +236,8 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, } /* Apply checksum. Not allowed on 802.3 links. */ - if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) + if (sk->sk_no_check_tx || + intrfc->if_dlink_type == htons(IPX_FRAME_8023)) ipx->ipx_checksum = htons(0xFFFF); else ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ed0716a075ba..a1186105f537 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1188,7 +1188,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len l2tp_xmit_ipv6_csum(sk, skb, udp_len); else #endif - if (sk->sk_no_check == UDP_CSUM_NOXMIT) + if (sk->sk_no_check_tx) skb->ip_summed = CHECKSUM_NONE; else if ((skb_dst(skb) && skb_dst(skb)->dev) && (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { @@ -1463,7 +1463,7 @@ static int l2tp_tunnel_sock_create(struct net *net, } if (!cfg->use_udp_checksums) - sock->sk->sk_no_check = UDP_CSUM_NOXMIT; + sock->sk->sk_no_check_tx = 1; break; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index bd7387adea9e..f3d331bdd706 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -297,8 +297,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla case L2TP_ENCAPTYPE_UDP: if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || - nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, - (sk->sk_no_check != UDP_CSUM_NOXMIT))) + nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx)) goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2af76eaba8f7..429899689408 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6946,7 +6946,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; - newsk->sk_no_check = sk->sk_no_check; + newsk->sk_no_check_tx = sk->sk_no_check_tx; + newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; -- cgit v1.2.3-59-g8ed1b From 8556ce79d5986a87fee4c29300b4efee07c0f15e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 23 May 2014 18:43:57 +0200 Subject: net: filter: remove DL macro Lets get rid of this macro. After commit 5bcfedf06f7f ("net: filter: simplify label names from jump-table"), labels have become more readable due to omission of BPF_ prefix but at the same time more generic, so that things like `git grep -n` would not find them. As a middle path, lets get rid of the DL macro as it's not strictly needed and would otherwise just hide the full name. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 3 - net/core/filter.c | 193 +++++++++++++++++++++++++------------------------ 2 files changed, 99 insertions(+), 97 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 7977b3958e25..2b0056afd1f7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -37,9 +37,6 @@ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ -/* Placeholder/dummy for 0 */ -#define BPF_0 0 - /* Register numbers */ enum { BPF_REG_0 = 0, diff --git a/net/core/filter.c b/net/core/filter.c index 7067cb240d3e..b3b0889fe089 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -160,95 +160,100 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins static const void *jumptable[256] = { [0 ... 255] = &&default_label, /* Now overwrite non-defaults ... */ -#define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C - DL(ALU, ADD, X), - DL(ALU, ADD, K), - DL(ALU, SUB, X), - DL(ALU, SUB, K), - DL(ALU, AND, X), - DL(ALU, AND, K), - DL(ALU, OR, X), - DL(ALU, OR, K), - DL(ALU, LSH, X), - DL(ALU, LSH, K), - DL(ALU, RSH, X), - DL(ALU, RSH, K), - DL(ALU, XOR, X), - DL(ALU, XOR, K), - DL(ALU, MUL, X), - DL(ALU, MUL, K), - DL(ALU, MOV, X), - DL(ALU, MOV, K), - DL(ALU, DIV, X), - DL(ALU, DIV, K), - DL(ALU, MOD, X), - DL(ALU, MOD, K), - DL(ALU, NEG, 0), - DL(ALU, END, TO_BE), - DL(ALU, END, TO_LE), - DL(ALU64, ADD, X), - DL(ALU64, ADD, K), - DL(ALU64, SUB, X), - DL(ALU64, SUB, K), - DL(ALU64, AND, X), - DL(ALU64, AND, K), - DL(ALU64, OR, X), - DL(ALU64, OR, K), - DL(ALU64, LSH, X), - DL(ALU64, LSH, K), - DL(ALU64, RSH, X), - DL(ALU64, RSH, K), - DL(ALU64, XOR, X), - DL(ALU64, XOR, K), - DL(ALU64, MUL, X), - DL(ALU64, MUL, K), - DL(ALU64, MOV, X), - DL(ALU64, MOV, K), - DL(ALU64, ARSH, X), - DL(ALU64, ARSH, K), - DL(ALU64, DIV, X), - DL(ALU64, DIV, K), - DL(ALU64, MOD, X), - DL(ALU64, MOD, K), - DL(ALU64, NEG, 0), - DL(JMP, CALL, 0), - DL(JMP, JA, 0), - DL(JMP, JEQ, X), - DL(JMP, JEQ, K), - DL(JMP, JNE, X), - DL(JMP, JNE, K), - DL(JMP, JGT, X), - DL(JMP, JGT, K), - DL(JMP, JGE, X), - DL(JMP, JGE, K), - DL(JMP, JSGT, X), - DL(JMP, JSGT, K), - DL(JMP, JSGE, X), - DL(JMP, JSGE, K), - DL(JMP, JSET, X), - DL(JMP, JSET, K), - DL(JMP, EXIT, 0), - DL(STX, MEM, B), - DL(STX, MEM, H), - DL(STX, MEM, W), - DL(STX, MEM, DW), - DL(STX, XADD, W), - DL(STX, XADD, DW), - DL(ST, MEM, B), - DL(ST, MEM, H), - DL(ST, MEM, W), - DL(ST, MEM, DW), - DL(LDX, MEM, B), - DL(LDX, MEM, H), - DL(LDX, MEM, W), - DL(LDX, MEM, DW), - DL(LD, ABS, W), - DL(LD, ABS, H), - DL(LD, ABS, B), - DL(LD, IND, W), - DL(LD, IND, H), - DL(LD, IND, B), -#undef DL + /* 32 bit ALU operations */ + [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, + [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K, + [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X, + [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K, + [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X, + [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K, + [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X, + [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K, + [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X, + [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K, + [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X, + [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K, + [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X, + [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K, + [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X, + [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K, + [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X, + [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K, + [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X, + [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K, + [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X, + [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K, + [BPF_ALU | BPF_NEG] = &&ALU_NEG, + [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE, + [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE, + /* 64 bit ALU operations */ + [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X, + [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K, + [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X, + [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K, + [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X, + [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K, + [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X, + [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K, + [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X, + [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K, + [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X, + [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K, + [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X, + [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K, + [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X, + [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K, + [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X, + [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K, + [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X, + [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K, + [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X, + [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K, + [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X, + [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K, + [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, + /* Call instruction */ + [BPF_JMP | BPF_CALL] = &&JMP_CALL, + /* Jumps */ + [BPF_JMP | BPF_JA] = &&JMP_JA, + [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, + [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K, + [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X, + [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, + [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, + [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, + [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, + [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, + [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, + [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, + [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, + [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, + [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, + [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, + /* Program return */ + [BPF_JMP | BPF_EXIT] = &&JMP_EXIT, + /* Store instructions */ + [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B, + [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H, + [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W, + [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW, + [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W, + [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW, + [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B, + [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H, + [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W, + [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW, + /* Load instructions */ + [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B, + [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H, + [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W, + [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW, + [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W, + [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H, + [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B, + [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, + [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, + [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, }; void *ptr; int off; @@ -290,10 +295,10 @@ select_insn: ALU(XOR, ^) ALU(MUL, *) #undef ALU - ALU_NEG_0: + ALU_NEG: A = (u32) -A; CONT; - ALU64_NEG_0: + ALU64_NEG: A = -A; CONT; ALU_MOV_X: @@ -382,7 +387,7 @@ select_insn: CONT; /* CALL */ - JMP_CALL_0: + JMP_CALL: /* Function call scratches BPF_R1-BPF_R5 registers, * preserves BPF_R6-BPF_R9, and stores return value * into BPF_R0. @@ -392,7 +397,7 @@ select_insn: CONT; /* JMP */ - JMP_JA_0: + JMP_JA: insn += insn->off; CONT; JMP_JEQ_X: @@ -479,7 +484,7 @@ select_insn: CONT_JMP; } CONT; - JMP_EXIT_0: + JMP_EXIT: return BPF_R0; /* STX and ST and LDX*/ -- cgit v1.2.3-59-g8ed1b From b1fcd35cf53553a0a3ef949b05106d921446abc3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 23 May 2014 18:43:58 +0200 Subject: net: filter: let unattached filters use sock_fprog_kern The sk_unattached_filter_create() API is used by BPF filters that are not directly attached or related to sockets, and are used in team, ptp, xt_bpf, cls_bpf, etc. As such all users do their own internal managment of obtaining filter blocks and thus already have them in kernel memory and set up before calling into sk_unattached_filter_create(). As a result, due to __user annotation in sock_fprog, sparse triggers false positives (incorrect type in assignment [different address space]) when filters are set up before passing them to sk_unattached_filter_create(). Therefore, let sk_unattached_filter_create() API use sock_fprog_kern to overcome this issue. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_ppp.c | 4 ++-- drivers/net/ppp/ppp_generic.c | 4 ++-- drivers/net/team/team_mode_loadbalance.c | 10 +++++----- include/linux/filter.h | 2 +- lib/test_bpf.c | 2 +- net/core/filter.c | 2 +- net/core/ptp_classifier.c | 2 +- net/netfilter/xt_bpf.c | 5 +++-- net/sched/cls_bpf.c | 4 ++-- 9 files changed, 18 insertions(+), 17 deletions(-) (limited to 'net/core') diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index a5da511e3c9a..61ac63237446 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -634,7 +634,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_IPPP_FILTER case PPPIOCSPASS: { - struct sock_fprog fprog; + struct sock_fprog_kern fprog; struct sock_filter *code; int err, len = get_filter(argp, &code); @@ -653,7 +653,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) } case PPPIOCSACTIVE: { - struct sock_fprog fprog; + struct sock_fprog_kern fprog; struct sock_filter *code; int err, len = get_filter(argp, &code); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e3923ebb693f..91d6c1272fcf 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { - struct sock_fprog fprog = { + struct sock_fprog_kern fprog = { .len = err, .filter = code, }; @@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { - struct sock_fprog fprog = { + struct sock_fprog_kern fprog = { .len = err, .filter = code, }; diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index dbde3412ee5e..0a6ee07bf0af 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -49,7 +49,7 @@ struct lb_port_mapping { struct lb_priv_ex { struct team *team; struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; - struct sock_fprog *orig_fprog; + struct sock_fprog_kern *orig_fprog; struct { unsigned int refresh_interval; /* in tenths of second */ struct delayed_work refresh_dw; @@ -241,10 +241,10 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) return 0; } -static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, +static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len, const void *data) { - struct sock_fprog *fprog; + struct sock_fprog_kern *fprog; struct sock_filter *filter = (struct sock_filter *) data; if (data_len % sizeof(struct sock_filter)) @@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, return 0; } -static void __fprog_destroy(struct sock_fprog *fprog) +static void __fprog_destroy(struct sock_fprog_kern *fprog) { kfree(fprog->filter); kfree(fprog); @@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) struct lb_priv *lb_priv = get_lb_priv(team); struct sk_filter *fp = NULL; struct sk_filter *orig_fp; - struct sock_fprog *fprog = NULL; + struct sock_fprog_kern *fprog = NULL; int err; if (ctx->data.bin_val.len) { diff --git a/include/linux/filter.h b/include/linux/filter.h index 2b0056afd1f7..625f4de9bdf2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -188,7 +188,7 @@ int sk_convert_filter(struct sock_filter *prog, int len, struct sock_filter_int *new_prog, int *new_len); int sk_unattached_filter_create(struct sk_filter **pfp, - struct sock_fprog *fprog); + struct sock_fprog_kern *fprog); void sk_unattached_filter_destroy(struct sk_filter *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 3d80adbdb559..e03991ea8cc2 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1472,7 +1472,7 @@ static int run_one(struct sk_filter *fp, struct bpf_test *t) static __init int test_bpf(void) { struct sk_filter *fp, *fp_ext = NULL; - struct sock_fprog fprog; + struct sock_fprog_kern fprog; int err, i, err_cnt = 0; for (i = 0; i < ARRAY_SIZE(tests); i++) { diff --git a/net/core/filter.c b/net/core/filter.c index b3b0889fe089..2c2d35d9d101 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1585,7 +1585,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, * a negative errno code is returned. On success the return is zero. */ int sk_unattached_filter_create(struct sk_filter **pfp, - struct sock_fprog *fprog) + struct sock_fprog_kern *fprog) { unsigned int fsize = sk_filter_proglen(fprog); struct sk_filter *fp; diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c index 37d86157b76e..d3027a73fd4b 100644 --- a/net/core/ptp_classifier.c +++ b/net/core/ptp_classifier.c @@ -133,7 +133,7 @@ void __init ptp_classifier_init(void) { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, }; - struct sock_fprog ptp_prog = { + struct sock_fprog_kern ptp_prog = { .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, }; diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 12d4da8e6c77..bbffdbdaf603 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c @@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf"); static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; - struct sock_fprog program; + struct sock_fprog_kern program; program.len = info->bpf_program_num_elem; - program.filter = (struct sock_filter __user *) info->bpf_program; + program.filter = info->bpf_program; + if (sk_unattached_filter_create(&info->filter, &program)) { pr_info("bpf: check failed: parse error\n"); return -EINVAL; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 16186965af97..13f64df2c710 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -160,7 +160,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, { struct sock_filter *bpf_ops, *bpf_old; struct tcf_exts exts; - struct sock_fprog tmp; + struct sock_fprog_kern tmp; struct sk_filter *fp, *fp_old; u16 bpf_size, bpf_len; u32 classid; @@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); tmp.len = bpf_len; - tmp.filter = (struct sock_filter __user *) bpf_ops; + tmp.filter = bpf_ops; ret = sk_unattached_filter_create(&fp, &tmp); if (ret) -- cgit v1.2.3-59-g8ed1b From 484611e53047b55163822bfe60424a55a31775b3 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Mon, 26 May 2014 14:52:46 +0530 Subject: net: tso: Export symbols for modular build Export the symbols to fix the below errors when built as modules: ERROR: "tso_build_data" [drivers/net/ethernet/marvell/mvneta.ko] undefined! ERROR: "tso_build_hdr" [drivers/net/ethernet/marvell/mvneta.ko] undefined! ERROR: "tso_start" [drivers/net/ethernet/marvell/mvneta.ko] undefined! ERROR: "tso_count_descs" [drivers/net/ethernet/marvell/mvneta.ko] undefined! ERROR: "tso_build_data" [drivers/net/ethernet/marvell/mv643xx_eth.ko] undefined! ERROR: "tso_build_hdr" [drivers/net/ethernet/marvell/mv643xx_eth.ko] undefined! ERROR: "tso_start" [drivers/net/ethernet/marvell/mv643xx_eth.ko] undefined! ERROR: "tso_count_descs" [drivers/net/ethernet/marvell/mv643xx_eth.ko] undefined! Signed-off-by: Sachin Kamat Acked-by: Ezequiel Garcia Signed-off-by: David S. Miller --- net/core/tso.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net/core') diff --git a/net/core/tso.c b/net/core/tso.c index 097821dd3a8c..8c3203c585b0 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -1,3 +1,4 @@ +#include #include #include @@ -7,6 +8,7 @@ int tso_count_descs(struct sk_buff *skb) /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } +EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) @@ -31,6 +33,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, tcph->rst = 0; } } +EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) { @@ -48,6 +51,7 @@ void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) tso->next_frag_idx++; } } +EXPORT_SYMBOL(tso_build_data); void tso_start(struct sk_buff *skb, struct tso_t *tso) { @@ -70,3 +74,4 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) tso->next_frag_idx++; } } +EXPORT_SYMBOL(tso_start); -- cgit v1.2.3-59-g8ed1b From 3480593131e0b781287dae0139bf7ccee7cba7ff Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 May 2014 10:22:50 +0200 Subject: net: filter: get rid of BPF_S_* enum This patch finally allows us to get rid of the BPF_S_* enum. Currently, the code performs unnecessary encode and decode workarounds in seccomp and filter migration itself when a filter is being attached in order to overcome BPF_S_* encoding which is not used anymore by the new interpreter resp. JIT compilers. Keeping it around would mean that also in future we would need to extend and maintain this enum and related encoders/decoders. We can get rid of all that and save us these operations during filter attaching. Naturally, also JIT compilers need to be updated by this. Before JIT conversion is being done, each compiler checks if A is being loaded at startup to obtain information if it needs to emit instructions to clear A first. Since BPF extensions are a subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements for extensions can be removed at that point. To ease and minimalize code changes in the classic JITs, we have introduced bpf_anc_helper(). Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int), arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we unfortunately didn't have access, but changes are analogous to the rest. Joint work with Alexei Starovoitov. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Mircea Gherzan Cc: Kees Cook Acked-by: Chema Gonzalez Signed-off-by: David S. Miller --- arch/arm/net/bpf_jit_32.c | 139 ++++++++-------- arch/powerpc/net/bpf_jit_64.S | 2 +- arch/powerpc/net/bpf_jit_comp.c | 157 +++++++++--------- arch/s390/net/bpf_jit_comp.c | 163 +++++++++---------- arch/sparc/net/bpf_jit_comp.c | 154 +++++++++--------- include/linux/filter.h | 108 +++++-------- kernel/seccomp.c | 83 +++++----- net/core/filter.c | 341 +++++++++++++++------------------------- 8 files changed, 498 insertions(+), 649 deletions(-) (limited to 'net/core') diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6f879c319a9d..fb5503ce016f 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx) u16 ret = 0; if ((ctx->skf->len > 1) || - (ctx->skf->insns[0].code == BPF_S_RET_A)) + (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) ret |= 1 << r_A; #ifdef CONFIG_FRAME_POINTER @@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx) static inline bool is_load_to_a(u16 inst) { switch (inst) { - case BPF_S_LD_W_LEN: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - case BPF_S_ANC_CPU: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: return true; default: return false; @@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_MOV_I(r_X, 0), ctx); /* do not leak kernel data to userspace */ - if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) + if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) emit(ARM_MOV_I(r_A, 0), ctx); /* stack space for the BPF_MEM words */ @@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx) u32 k; for (i = 0; i < prog->len; i++) { + u16 code; + inst = &(prog->insns[i]); /* K as an immediate value operand */ k = inst->k; + code = bpf_anc_helper(inst); /* compute offsets only in the fake pass */ if (ctx->target == NULL) ctx->offsets[i] = ctx->idx * 4; - switch (inst->code) { - case BPF_S_LD_IMM: + switch (code) { + case BPF_LD | BPF_IMM: emit_mov_i(r_A, k, ctx); break; - case BPF_S_LD_W_LEN: + case BPF_LD | BPF_W | BPF_LEN: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); emit(ARM_LDR_I(r_A, r_skb, offsetof(struct sk_buff, len)), ctx); break; - case BPF_S_LD_MEM: + case BPF_LD | BPF_MEM: /* A = scratch[k] */ ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: load_order = 2; goto load; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: load_order = 1; goto load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: load_order = 0; load: /* the interpreter will deal with the negative K */ @@ -552,31 +547,31 @@ load_common: emit_err_ret(ARM_COND_NE, ctx); emit(ARM_MOV_R(r_A, ARM_R0), ctx); break; - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: load_order = 2; goto load_ind; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: load_order = 1; goto load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: load_order = 0; load_ind: OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); goto load_common; - case BPF_S_LDX_IMM: + case BPF_LDX | BPF_IMM: ctx->seen |= SEEN_X; emit_mov_i(r_X, k, ctx); break; - case BPF_S_LDX_W_LEN: + case BPF_LDX | BPF_W | BPF_LEN: ctx->seen |= SEEN_X | SEEN_SKB; emit(ARM_LDR_I(r_X, r_skb, offsetof(struct sk_buff, len)), ctx); break; - case BPF_S_LDX_MEM: + case BPF_LDX | BPF_MEM: ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: /* x = ((*(frame + k)) & 0xf) << 2; */ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; /* the interpreter should deal with the negative K */ @@ -606,113 +601,113 @@ load_ind: emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); emit(ARM_LSL_I(r_X, r_X, 2), ctx); break; - case BPF_S_ST: + case BPF_ST: ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_STX: + case BPF_STX: update_on_xread(ctx); ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_ALU_ADD_K: + case BPF_ALU | BPF_ADD | BPF_K: /* A += K */ OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); break; - case BPF_S_ALU_ADD_X: + case BPF_ALU | BPF_ADD | BPF_X: update_on_xread(ctx); emit(ARM_ADD_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_SUB_K: + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); break; - case BPF_S_ALU_SUB_X: + case BPF_ALU | BPF_SUB | BPF_X: update_on_xread(ctx); emit(ARM_SUB_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_MUL_K: + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_mov_i(r_scratch, k, ctx); emit(ARM_MUL(r_A, r_A, r_scratch), ctx); break; - case BPF_S_ALU_MUL_X: + case BPF_ALU | BPF_MUL | BPF_X: update_on_xread(ctx); emit(ARM_MUL(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_DIV_K: + case BPF_ALU | BPF_DIV | BPF_K: if (k == 1) break; emit_mov_i(r_scratch, k, ctx); emit_udiv(r_A, r_A, r_scratch, ctx); break; - case BPF_S_ALU_DIV_X: + case BPF_ALU | BPF_DIV | BPF_X: update_on_xread(ctx); emit(ARM_CMP_I(r_X, 0), ctx); emit_err_ret(ARM_COND_EQ, ctx); emit_udiv(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_OR_K: + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); break; - case BPF_S_ALU_OR_X: + case BPF_ALU | BPF_OR | BPF_X: update_on_xread(ctx); emit(ARM_ORR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_XOR_K: + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K; */ OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: + case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ update_on_xread(ctx); emit(ARM_EOR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_AND_K: + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ OP_IMM3(ARM_AND, r_A, r_A, k, ctx); break; - case BPF_S_ALU_AND_X: + case BPF_ALU | BPF_AND | BPF_X: update_on_xread(ctx); emit(ARM_AND_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_LSH_K: + case BPF_ALU | BPF_LSH | BPF_K: if (unlikely(k > 31)) return -1; emit(ARM_LSL_I(r_A, r_A, k), ctx); break; - case BPF_S_ALU_LSH_X: + case BPF_ALU | BPF_LSH | BPF_X: update_on_xread(ctx); emit(ARM_LSL_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_RSH_K: + case BPF_ALU | BPF_RSH | BPF_K: if (unlikely(k > 31)) return -1; emit(ARM_LSR_I(r_A, r_A, k), ctx); break; - case BPF_S_ALU_RSH_X: + case BPF_ALU | BPF_RSH | BPF_X: update_on_xread(ctx); emit(ARM_LSR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: /* A = -A */ emit(ARM_RSB_I(r_A, r_A, 0), ctx); break; - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: /* pc += K */ emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); break; - case BPF_S_JMP_JEQ_K: + case BPF_JMP | BPF_JEQ | BPF_K: /* pc += (A == K) ? pc->jt : pc->jf */ condt = ARM_COND_EQ; goto cmp_imm; - case BPF_S_JMP_JGT_K: + case BPF_JMP | BPF_JGT | BPF_K: /* pc += (A > K) ? pc->jt : pc->jf */ condt = ARM_COND_HI; goto cmp_imm; - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JGE | BPF_K: /* pc += (A >= K) ? pc->jt : pc->jf */ condt = ARM_COND_HS; cmp_imm: @@ -731,22 +726,22 @@ cond_jump: _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, ctx)), ctx); break; - case BPF_S_JMP_JEQ_X: + case BPF_JMP | BPF_JEQ | BPF_X: /* pc += (A == X) ? pc->jt : pc->jf */ condt = ARM_COND_EQ; goto cmp_x; - case BPF_S_JMP_JGT_X: + case BPF_JMP | BPF_JGT | BPF_X: /* pc += (A > X) ? pc->jt : pc->jf */ condt = ARM_COND_HI; goto cmp_x; - case BPF_S_JMP_JGE_X: + case BPF_JMP | BPF_JGE | BPF_X: /* pc += (A >= X) ? pc->jt : pc->jf */ condt = ARM_COND_CS; cmp_x: update_on_xread(ctx); emit(ARM_CMP_R(r_A, r_X), ctx); goto cond_jump; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: /* pc += (A & K) ? pc->jt : pc->jf */ condt = ARM_COND_NE; /* not set iff all zeroes iff Z==1 iff EQ */ @@ -759,16 +754,16 @@ cmp_x: emit(ARM_TST_I(r_A, imm12), ctx); } goto cond_jump; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: /* pc += (A & X) ? pc->jt : pc->jf */ update_on_xread(ctx); condt = ARM_COND_NE; emit(ARM_TST_R(r_A, r_X), ctx); goto cond_jump; - case BPF_S_RET_A: + case BPF_RET | BPF_A: emit(ARM_MOV_R(ARM_R0, r_A), ctx); goto b_epilogue; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if ((k == 0) && (ctx->ret0_fp_idx < 0)) ctx->ret0_fp_idx = i; emit_mov_i(ARM_R0, k, ctx); @@ -776,17 +771,17 @@ b_epilogue: if (i != ctx->skf->len - 1) emit(ARM_B(b_imm(prog->len, ctx)), ctx); break; - case BPF_S_MISC_TAX: + case BPF_MISC | BPF_TAX: /* X = A */ ctx->seen |= SEEN_X; emit(ARM_MOV_R(r_X, r_A), ctx); break; - case BPF_S_MISC_TXA: + case BPF_MISC | BPF_TXA: /* A = X */ update_on_xread(ctx); emit(ARM_MOV_R(r_A, r_X), ctx); break; - case BPF_S_ANC_PROTOCOL: + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol) */ ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, @@ -795,7 +790,7 @@ b_epilogue: emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); emit_swap16(r_A, r_scratch, ctx); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: /* r_scratch = current_thread_info() */ OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); /* A = current_thread_info()->cpu */ @@ -803,7 +798,7 @@ b_epilogue: off = offsetof(struct thread_info, cpu); emit(ARM_LDR_I(r_A, r_scratch, off), ctx); break; - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: /* A = skb->dev->ifindex */ ctx->seen |= SEEN_SKB; off = offsetof(struct sk_buff, dev); @@ -817,30 +812,30 @@ b_epilogue: off = offsetof(struct net_device, ifindex); emit(ARM_LDR_I(r_A, r_scratch, off), ctx); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); off = offsetof(struct sk_buff, mark); emit(ARM_LDR_I(r_A, r_skb, off), ctx); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); off = offsetof(struct sk_buff, hash); emit(ARM_LDR_I(r_A, r_skb, off), ctx); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); off = offsetof(struct sk_buff, vlan_tci); emit(ARM_LDRH_I(r_A, r_skb, off), ctx); - if (inst->code == BPF_S_ANC_VLAN_TAG) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); else OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index e76eba74d9da..8f87d9217122 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -78,7 +78,7 @@ sk_load_byte_positive_offset: blr /* - * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) + * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf) * r_addr is the offset value */ .globl sk_load_byte_msh diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 808ce1cae21a..6dcdadefd8d0 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, } switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_CPU: - case BPF_S_ANC_QUEUE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: /* first instruction sets A register (or is RET 'constant') */ break; default: @@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; + u16 code = bpf_anc_helper(&filter[i]); /* * addrs[] maps a BPF bytecode address into a real offset from @@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, */ addrs[i] = ctx->idx * 4; - switch (filter[i].code) { + switch (code) { /*** ALU ops ***/ - case BPF_S_ALU_ADD_X: /* A += X; */ + case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ ctx->seen |= SEEN_XREG; PPC_ADD(r_A, r_A, r_X); break; - case BPF_S_ALU_ADD_K: /* A += K; */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(K)); break; - case BPF_S_ALU_SUB_X: /* A -= X; */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ ctx->seen |= SEEN_XREG; PPC_SUB(r_A, r_A, r_X); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(-K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(-K)); break; - case BPF_S_ALU_MUL_X: /* A *= X; */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ ctx->seen |= SEEN_XREG; PPC_MUL(r_A, r_A, r_X); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ if (K < 32768) PPC_MULI(r_A, r_A, K); else { @@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_A, r_A, r_scratch1); } break; - case BPF_S_ALU_MOD_X: /* A %= X; */ + case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_scratch1, r_X, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_MOD_K: /* A %= K; */ + case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); PPC_DIVWU(r_scratch1, r_A, r_scratch2); PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_DIV_X: /* A /= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } PPC_DIVWU(r_A, r_A, r_X); break; - case BPF_S_ALU_DIV_K: /* A /= K */ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; PPC_LI32(r_scratch1, K); PPC_DIVWU(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_AND_X: + case BPF_ALU | BPF_AND | BPF_X: ctx->seen |= SEEN_XREG; PPC_AND(r_A, r_A, r_X); break; - case BPF_S_ALU_AND_K: + case BPF_ALU | BPF_AND | BPF_K: if (!IMM_H(K)) PPC_ANDI(r_A, r_A, K); else { @@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_AND(r_A, r_A, r_scratch1); } break; - case BPF_S_ALU_OR_X: + case BPF_ALU | BPF_OR | BPF_X: ctx->seen |= SEEN_XREG; PPC_OR(r_A, r_A, r_X); break; - case BPF_S_ALU_OR_K: + case BPF_ALU | BPF_OR | BPF_K: if (IMM_L(K)) PPC_ORI(r_A, r_A, IMM_L(K)); if (K >= 65536) PPC_ORIS(r_A, r_A, IMM_H(K)); break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: /* A ^= X */ + case BPF_ANC | SKF_AD_ALU_XOR_X: + case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ ctx->seen |= SEEN_XREG; PPC_XOR(r_A, r_A, r_X); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ if (IMM_L(K)) PPC_XORI(r_A, r_A, IMM_L(K)); if (K >= 65536) PPC_XORIS(r_A, r_A, IMM_H(K)); break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ ctx->seen |= SEEN_XREG; PPC_SLW(r_A, r_A, r_X); break; - case BPF_S_ALU_LSH_K: + case BPF_ALU | BPF_LSH | BPF_K: if (K == 0) break; else PPC_SLWI(r_A, r_A, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ ctx->seen |= SEEN_XREG; PPC_SRW(r_A, r_A, r_X); break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ if (K == 0) break; else PPC_SRWI(r_A, r_A, K); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: PPC_NEG(r_A, r_A); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: PPC_LI32(r_ret, K); if (!K) { if (ctx->pc_ret0 == -1) @@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BLR(); } break; - case BPF_S_RET_A: + case BPF_RET | BPF_A: PPC_MR(r_ret, r_A); if (i != flen - 1) { if (ctx->seen) @@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BLR(); } break; - case BPF_S_MISC_TAX: /* X = A */ + case BPF_MISC | BPF_TAX: /* X = A */ PPC_MR(r_X, r_A); break; - case BPF_S_MISC_TXA: /* A = X */ + case BPF_MISC | BPF_TXA: /* A = X */ ctx->seen |= SEEN_XREG; PPC_MR(r_A, r_X); break; /*** Constant loads/M[] access ***/ - case BPF_S_LD_IMM: /* A = K */ + case BPF_LD | BPF_IMM: /* A = K */ PPC_LI32(r_A, K); break; - case BPF_S_LDX_IMM: /* X = K */ + case BPF_LDX | BPF_IMM: /* X = K */ PPC_LI32(r_X, K); break; - case BPF_S_LD_MEM: /* A = mem[K] */ + case BPF_LD | BPF_MEM: /* A = mem[K] */ PPC_MR(r_A, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_LDX_MEM: /* X = mem[K] */ + case BPF_LDX | BPF_MEM: /* X = mem[K] */ PPC_MR(r_X, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_ST: /* mem[K] = A */ + case BPF_ST: /* mem[K] = A */ PPC_MR(r_M + (K & 0xf), r_A); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_STX: /* mem[K] = X */ + case BPF_STX: /* mem[K] = X */ PPC_MR(r_M + (K & 0xf), r_X); ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ + case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); break; /*** Ancillary info loads ***/ - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, protocol)); break; - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); @@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, mark)); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, hash)); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, vlan_tci)); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) PPC_ANDI(r_A, r_A, VLAN_VID_MASK); else PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: #ifdef CONFIG_SMP /* * PACA ptr is r13: @@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Absolute loads from packet header/data ***/ - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: /* Load from [K]. */ @@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Indirect loads from packet header/data ***/ - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: func = sk_load_word; goto common_load_ind; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: func = sk_load_half; goto common_load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: func = sk_load_byte; common_load_ind: /* @@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BCC(COND_LT, exit_addr); break; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; /*** Jump and branches ***/ - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: if (K != 0) PPC_JMP(addrs[i + 1 + K]); break; - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: true_cond = COND_GT; goto cond_branch; - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: true_cond = COND_GE; goto cond_branch; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: true_cond = COND_EQ; goto cond_branch; - case BPF_S_JMP_JSET_K: - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: true_cond = COND_NE; /* Fall through */ cond_branch: @@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; } - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: ctx->seen |= SEEN_XREG; PPC_CMPLW(r_A, r_X); break; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: ctx->seen |= SEEN_XREG; PPC_AND_DOT(r_scratch1, r_A, r_X); break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: if (K < 32768) PPC_CMPLWI(r_A, K); else { @@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_CMPLW(r_A, r_scratch1); } break; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: if (K < 32768) /* PPC_ANDI is /only/ dot-form */ PPC_ANDI(r_scratch1, r_A, K); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index e9f8fa9337fe..a2cbd875543a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) EMIT4(0xa7c80000); /* Clear A if the first register does not set it. */ switch (filter[0].code) { - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - case BPF_S_LD_W_LEN: - case BPF_S_LD_W_IND: - case BPF_S_LD_H_IND: - case BPF_S_LD_B_IND: - case BPF_S_LD_IMM: - case BPF_S_LD_MEM: - case BPF_S_MISC_TXA: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_PKTTYPE: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_QUEUE: - case BPF_S_ANC_HATYPE: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_CPU: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_RET_K: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_IND: + case BPF_LD | BPF_H | BPF_IND: + case BPF_LD | BPF_B | BPF_IND: + case BPF_LD | BPF_IMM: + case BPF_LD | BPF_MEM: + case BPF_MISC | BPF_TXA: + case BPF_RET | BPF_K: /* first instruction sets A register */ break; default: /* A = 0 */ @@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, unsigned int K; int offset; unsigned int mask; + u16 code; K = filter->k; - switch (filter->code) { - case BPF_S_ALU_ADD_X: /* A += X */ + code = bpf_anc_helper(filter); + + switch (code) { + case BPF_ALU | BPF_ADD | BPF_X: /* A += X */ jit->seen |= SEEN_XREG; /* ar %r5,%r12 */ EMIT2(0x1a5c); break; - case BPF_S_ALU_ADD_K: /* A += K */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K */ if (!K) break; if (K <= 16383) @@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* a %r5,(%r13) */ EMIT4_DISP(0x5a50d000, EMIT_CONST(K)); break; - case BPF_S_ALU_SUB_X: /* A -= X */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */ jit->seen |= SEEN_XREG; /* sr %r5,%r12 */ EMIT2(0x1b5c); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ if (!K) break; if (K <= 16384) @@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* s %r5,(%r13) */ EMIT4_DISP(0x5b50d000, EMIT_CONST(K)); break; - case BPF_S_ALU_MUL_X: /* A *= X */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */ jit->seen |= SEEN_XREG; /* msr %r5,%r12 */ EMIT4(0xb252005c); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ if (K <= 16383) /* mhi %r5,K */ EMIT4_IMM(0xa75c0000, K); @@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* ms %r5,(%r13) */ EMIT4_DISP(0x7150d000, EMIT_CONST(K)); break; - case BPF_S_ALU_DIV_X: /* A /= X */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */ jit->seen |= SEEN_XREG | SEEN_RET0; /* ltr %r12,%r12 */ EMIT2(0x12cc); @@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* dlr %r4,%r12 */ EMIT4(0xb997004c); break; - case BPF_S_ALU_DIV_K: /* A /= K */ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; /* lhi %r4,0 */ @@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* dl %r4,(%r13) */ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); break; - case BPF_S_ALU_MOD_X: /* A %= X */ + case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */ jit->seen |= SEEN_XREG | SEEN_RET0; /* ltr %r12,%r12 */ EMIT2(0x12cc); @@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* lr %r5,%r4 */ EMIT2(0x1854); break; - case BPF_S_ALU_MOD_K: /* A %= K */ + case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */ if (K == 1) { /* lhi %r5,0 */ EMIT4(0xa7580000); @@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* lr %r5,%r4 */ EMIT2(0x1854); break; - case BPF_S_ALU_AND_X: /* A &= X */ + case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ jit->seen |= SEEN_XREG; /* nr %r5,%r12 */ EMIT2(0x145c); break; - case BPF_S_ALU_AND_K: /* A &= K */ + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ if (test_facility(21)) /* nilf %r5, */ EMIT6_IMM(0xc05b0000, K); @@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* n %r5,(%r13) */ EMIT4_DISP(0x5450d000, EMIT_CONST(K)); break; - case BPF_S_ALU_OR_X: /* A |= X */ + case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ jit->seen |= SEEN_XREG; /* or %r5,%r12 */ EMIT2(0x165c); break; - case BPF_S_ALU_OR_K: /* A |= K */ + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ if (test_facility(21)) /* oilf %r5, */ EMIT6_IMM(0xc05d0000, K); @@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* o %r5,(%r13) */ EMIT4_DISP(0x5650d000, EMIT_CONST(K)); break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ + case BPF_ALU | BPF_XOR | BPF_X: jit->seen |= SEEN_XREG; /* xr %r5,%r12 */ EMIT2(0x175c); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ if (!K) break; /* x %r5,(%r13) */ EMIT4_DISP(0x5750d000, EMIT_CONST(K)); break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ jit->seen |= SEEN_XREG; /* sll %r5,0(%r12) */ EMIT4(0x8950c000); break; - case BPF_S_ALU_LSH_K: /* A <<= K */ + case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ if (K == 0) break; /* sll %r5,K */ EMIT4_DISP(0x89500000, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ jit->seen |= SEEN_XREG; /* srl %r5,0(%r12) */ EMIT4(0x8850c000); break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ if (K == 0) break; /* srl %r5,K */ EMIT4_DISP(0x88500000, K); break; - case BPF_S_ALU_NEG: /* A = -A */ + case BPF_ALU | BPF_NEG: /* A = -A */ /* lnr %r5,%r5 */ EMIT2(0x1155); break; - case BPF_S_JMP_JA: /* ip += K */ + case BPF_JMP | BPF_JA: /* ip += K */ offset = addrs[i + K] + jit->start - jit->prg; EMIT4_PCREL(0xa7f40000, offset); break; - case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */ + case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */ mask = 0x200000; /* jh */ goto kbranch; - case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */ + case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */ mask = 0xa00000; /* jhe */ goto kbranch; - case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */ + case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */ mask = 0x800000; /* je */ kbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { @@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) { EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset); } break; - case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */ + case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */ mask = 0x700000; /* jnz */ /* Emit test if the branch targets are different */ if (filter->jt != filter->jf) { @@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) { EMIT4_IMM(0xa7510000, K); } goto branch; - case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */ + case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */ mask = 0x200000; /* jh */ goto xbranch; - case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */ + case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */ mask = 0xa00000; /* jhe */ goto xbranch; - case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */ + case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */ mask = 0x800000; /* je */ xbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { @@ -540,7 +533,7 @@ xbranch: /* Emit compare if the branch targets are different */ EMIT2(0x195c); } goto branch; - case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */ + case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ mask = 0x700000; /* jnz */ /* Emit test if the branch targets are different */ if (filter->jt != filter->jf) { @@ -551,15 +544,15 @@ xbranch: /* Emit compare if the branch targets are different */ EMIT2(0x144c); } goto branch; - case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */ + case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD; offset = jit->off_load_word; goto load_abs; - case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */ + case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF; offset = jit->off_load_half; goto load_abs; - case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */ + case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE; offset = jit->off_load_byte; load_abs: if ((int) K < 0) @@ -573,19 +566,19 @@ call_fn: /* lg %r1,(%r13) */ /* jnz */ EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg)); break; - case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */ + case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD; offset = jit->off_load_iword; goto call_fn; - case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */ + case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF; offset = jit->off_load_ihalf; goto call_fn; - case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */ + case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE; offset = jit->off_load_ibyte; goto call_fn; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ jit->seen |= SEEN_RET0; if ((int) K < 0) { @@ -596,17 +589,17 @@ call_fn: /* lg %r1,(%r13) */ jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH; offset = jit->off_load_bmsh; goto call_fn; - case BPF_S_LD_W_LEN: /* A = skb->len; */ + case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len)); break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ jit->seen |= SEEN_XREG; /* l %r12,(%r2) */ EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len)); break; - case BPF_S_LD_IMM: /* A = K */ + case BPF_LD | BPF_IMM: /* A = K */ if (K <= 16383) /* lhi %r5,K */ EMIT4_IMM(0xa7580000, K); @@ -617,7 +610,7 @@ call_fn: /* lg %r1,(%r13) */ /* l %r5,(%r13) */ EMIT4_DISP(0x5850d000, EMIT_CONST(K)); break; - case BPF_S_LDX_IMM: /* X = K */ + case BPF_LDX | BPF_IMM: /* X = K */ jit->seen |= SEEN_XREG; if (K <= 16383) /* lhi %r12, */ @@ -629,29 +622,29 @@ call_fn: /* lg %r1,(%r13) */ /* l %r12,(%r13) */ EMIT4_DISP(0x58c0d000, EMIT_CONST(K)); break; - case BPF_S_LD_MEM: /* A = mem[K] */ + case BPF_LD | BPF_MEM: /* A = mem[K] */ jit->seen |= SEEN_MEM; /* l %r5,(%r15) */ EMIT4_DISP(0x5850f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_LDX_MEM: /* X = mem[K] */ + case BPF_LDX | BPF_MEM: /* X = mem[K] */ jit->seen |= SEEN_XREG | SEEN_MEM; /* l %r12,(%r15) */ EMIT4_DISP(0x58c0f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_MISC_TAX: /* X = A */ + case BPF_MISC | BPF_TAX: /* X = A */ jit->seen |= SEEN_XREG; /* lr %r12,%r5 */ EMIT2(0x18c5); break; - case BPF_S_MISC_TXA: /* A = X */ + case BPF_MISC | BPF_TXA: /* A = X */ jit->seen |= SEEN_XREG; /* lr %r5,%r12 */ EMIT2(0x185c); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if (K == 0) { jit->seen |= SEEN_RET0; if (last) @@ -671,33 +664,33 @@ call_fn: /* lg %r1,(%r13) */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); } break; - case BPF_S_RET_A: + case BPF_RET | BPF_A: /* llgfr %r2,%r5 */ EMIT4(0xb9160025); /* j */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); break; - case BPF_S_ST: /* mem[K] = A */ + case BPF_ST: /* mem[K] = A */ jit->seen |= SEEN_MEM; /* st %r5,(%r15) */ EMIT4_DISP(0x5050f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ + case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ jit->seen |= SEEN_XREG | SEEN_MEM; /* st %r12,(%r15) */ EMIT4_DISP(0x50c0f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol)); break; - case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0; - * A = skb->dev->ifindex */ + case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0; + * A = skb->dev->ifindex */ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); jit->seen |= SEEN_RET0; /* lg %r1,(%r2) */ @@ -709,20 +702,20 @@ call_fn: /* lg %r1,(%r13) */ /* l %r5,(%r1) */ EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex)); break; - case BPF_S_ANC_MARK: /* A = skb->mark */ + case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark)); break; - case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */ + case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping)); break; - case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0; - * A = skb->dev->type */ + case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0; + * A = skb->dev->type */ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); jit->seen |= SEEN_RET0; /* lg %r1,(%r2) */ @@ -736,20 +729,20 @@ call_fn: /* lg %r1,(%r13) */ /* icm %r5,3,(%r1) */ EMIT4_DISP(0xbf531000, offsetof(struct net_device, type)); break; - case BPF_S_ANC_RXHASH: /* A = skb->hash */ + case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash)); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); - if (filter->code == BPF_S_ANC_VLAN_TAG) { + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { /* nill %r5,0xefff */ EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); } else { @@ -759,7 +752,7 @@ call_fn: /* lg %r1,(%r13) */ EMIT4_DISP(0x88500000, 12); } break; - case BPF_S_ANC_PKTTYPE: + case BPF_ANC | SKF_AD_PKTTYPE: if (pkt_type_offset < 0) goto out; /* lhi %r5,0 */ @@ -769,7 +762,7 @@ call_fn: /* lg %r1,(%r13) */ /* srl %r5,5 */ EMIT4_DISP(0x88500000, 5); break; - case BPF_S_ANC_CPU: /* A = smp_processor_id() */ + case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */ #ifdef CONFIG_SMP /* l %r5, */ EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr)); diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index a82c6b2a9780..c88cf147deed 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp) emit_reg_move(O7, r_saved_O7); switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_PKTTYPE: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_CPU: - case BPF_S_ANC_QUEUE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: /* The first instruction sets the A register (or is * a "RET 'constant'") */ @@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp) unsigned int t_offset; unsigned int f_offset; u32 t_op, f_op; + u16 code = bpf_anc_helper(&filter[i]); int ilen; - switch (filter[i].code) { - case BPF_S_ALU_ADD_X: /* A += X; */ + switch (code) { + case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ emit_alu_X(ADD); break; - case BPF_S_ALU_ADD_K: /* A += K; */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ emit_alu_K(ADD, K); break; - case BPF_S_ALU_SUB_X: /* A -= X; */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ emit_alu_X(SUB); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ emit_alu_K(SUB, K); break; - case BPF_S_ALU_AND_X: /* A &= X */ + case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ emit_alu_X(AND); break; - case BPF_S_ALU_AND_K: /* A &= K */ + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ emit_alu_K(AND, K); break; - case BPF_S_ALU_OR_X: /* A |= X */ + case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ emit_alu_X(OR); break; - case BPF_S_ALU_OR_K: /* A |= K */ + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ emit_alu_K(OR, K); break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ + case BPF_ALU | BPF_XOR | BPF_X: emit_alu_X(XOR); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ emit_alu_K(XOR, K); break; - case BPF_S_ALU_LSH_X: /* A <<= X */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ emit_alu_X(SLL); break; - case BPF_S_ALU_LSH_K: /* A <<= K */ + case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ emit_alu_K(SLL, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ emit_alu_X(SRL); break; - case BPF_S_ALU_RSH_K: /* A >>= K */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ emit_alu_K(SRL, K); break; - case BPF_S_ALU_MUL_X: /* A *= X; */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ emit_alu_X(MUL); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_alu_K(MUL, K); break; - case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break; emit_write_y(G0); @@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp) #endif emit_alu_K(DIV, K); break; - case BPF_S_ALU_DIV_X: /* A /= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; @@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp) #endif emit_alu_X(DIV); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: emit_neg(); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; @@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp) emit_loadimm(K, r_A); } /* Fallthrough */ - case BPF_S_RET_A: + case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { emit_jump(cleanup_addr); @@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp) emit_jmpl(r_saved_O7, 8, G0); emit_reg_move(r_A, O0); /* delay slot */ break; - case BPF_S_MISC_TAX: + case BPF_MISC | BPF_TAX: seen |= SEEN_XREG; emit_reg_move(r_A, r_X); break; - case BPF_S_MISC_TXA: + case BPF_MISC | BPF_TXA: seen |= SEEN_XREG; emit_reg_move(r_X, r_A); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: emit_load_cpu(r_A); break; - case BPF_S_ANC_PROTOCOL: + case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; #if 0 @@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp) * a bit field even though we very much * know what we are doing here. */ - case BPF_S_ANC_PKTTYPE: + case BPF_ANC | SKF_AD_PKTTYPE: __emit_skb_load8(pkt_type, r_A); emit_alu_K(SRL, 5); break; #endif - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BNE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: emit_skb_load32(mark, r_A); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: emit_skb_load16(queue_mapping, r_A); break; - case BPF_S_ANC_HATYPE: + case BPF_ANC | SKF_AD_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BNE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: emit_skb_load32(hash, r_A); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: emit_skb_load16(vlan_tci, r_A); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) { + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { emit_andi(r_A, VLAN_VID_MASK, r_A); } else { emit_loadimm(VLAN_TAG_PRESENT, r_TMP); @@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp) } break; - case BPF_S_LD_IMM: + case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; - case BPF_S_LDX_IMM: + case BPF_LDX | BPF_IMM: emit_loadimm(K, r_X); break; - case BPF_S_LD_MEM: + case BPF_LD | BPF_MEM: emit_ldmem(K * 4, r_A); break; - case BPF_S_LDX_MEM: + case BPF_LDX | BPF_MEM: emit_ldmem(K * 4, r_X); break; - case BPF_S_ST: + case BPF_ST: emit_stmem(K * 4, r_A); break; - case BPF_S_STX: + case BPF_STX: emit_stmem(K * 4, r_X); break; #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); common_load: seen |= SEEN_DATAREF; emit_loadimm(K, r_OFF); emit_call(func); break; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); goto common_load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); goto common_load; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); goto common_load; - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: func = bpf_jit_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; if (K) { @@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; } emit_call(func); break; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: func = bpf_jit_load_half; goto common_load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: func = bpf_jit_load_byte; goto common_load_ind; - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: emit_jump(addrs[i + K]); emit_nop(); break; @@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; f_op = FOP; \ goto cond_branch - COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); - COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); - COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); - COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); - COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); - COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); - COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); - COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); + COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); + COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); + COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); + COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); + COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); + COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); + COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); + COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); cond_branch: f_offset = addrs[i + filter[i].jf]; t_offset = addrs[i + filter[i].jt]; @@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; break; } - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: seen |= SEEN_XREG; emit_cmp(r_A, r_X); break; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: seen |= SEEN_XREG; emit_btst(r_A, r_X); break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: if (is_simm13(K)) { emit_cmpi(r_A, K); } else { @@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; emit_cmp(r_A, r_TMP); } break; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: if (is_simm13(K)) { emit_btsti(r_A, K); } else { diff --git a/include/linux/filter.h b/include/linux/filter.h index 625f4de9bdf2..49ef7a298c92 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -197,7 +197,6 @@ int sk_detach_filter(struct sock *sk); int sk_chk_filter(struct sock_filter *filter, unsigned int flen); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); -void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); void sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); @@ -205,6 +204,41 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct sk_filter *fp); +#define BPF_ANC BIT(15) + +static inline u16 bpf_anc_helper(const struct sock_filter *ftest) +{ + BUG_ON(ftest->code & BPF_ANC); + + switch (ftest->code) { + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: +#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ + return BPF_ANC | SKF_AD_##CODE + switch (ftest->k) { + BPF_ANCILLARY(PROTOCOL); + BPF_ANCILLARY(PKTTYPE); + BPF_ANCILLARY(IFINDEX); + BPF_ANCILLARY(NLATTR); + BPF_ANCILLARY(NLATTR_NEST); + BPF_ANCILLARY(MARK); + BPF_ANCILLARY(QUEUE); + BPF_ANCILLARY(HATYPE); + BPF_ANCILLARY(RXHASH); + BPF_ANCILLARY(CPU); + BPF_ANCILLARY(ALU_XOR_X); + BPF_ANCILLARY(VLAN_TAG); + BPF_ANCILLARY(VLAN_TAG_PRESENT); + BPF_ANCILLARY(PAY_OFFSET); + BPF_ANCILLARY(RANDOM); + } + /* Fallthrough. */ + default: + return ftest->code; + } +} + #ifdef CONFIG_BPF_JIT #include #include @@ -224,86 +258,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, } #else #include + static inline void bpf_jit_compile(struct sk_filter *fp) { } + static inline void bpf_jit_free(struct sk_filter *fp) { kfree(fp); } -#endif +#endif /* CONFIG_BPF_JIT */ static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; } -enum { - BPF_S_RET_K = 1, - BPF_S_RET_A, - BPF_S_ALU_ADD_K, - BPF_S_ALU_ADD_X, - BPF_S_ALU_SUB_K, - BPF_S_ALU_SUB_X, - BPF_S_ALU_MUL_K, - BPF_S_ALU_MUL_X, - BPF_S_ALU_DIV_X, - BPF_S_ALU_MOD_K, - BPF_S_ALU_MOD_X, - BPF_S_ALU_AND_K, - BPF_S_ALU_AND_X, - BPF_S_ALU_OR_K, - BPF_S_ALU_OR_X, - BPF_S_ALU_XOR_K, - BPF_S_ALU_XOR_X, - BPF_S_ALU_LSH_K, - BPF_S_ALU_LSH_X, - BPF_S_ALU_RSH_K, - BPF_S_ALU_RSH_X, - BPF_S_ALU_NEG, - BPF_S_LD_W_ABS, - BPF_S_LD_H_ABS, - BPF_S_LD_B_ABS, - BPF_S_LD_W_LEN, - BPF_S_LD_W_IND, - BPF_S_LD_H_IND, - BPF_S_LD_B_IND, - BPF_S_LD_IMM, - BPF_S_LDX_W_LEN, - BPF_S_LDX_B_MSH, - BPF_S_LDX_IMM, - BPF_S_MISC_TAX, - BPF_S_MISC_TXA, - BPF_S_ALU_DIV_K, - BPF_S_LD_MEM, - BPF_S_LDX_MEM, - BPF_S_ST, - BPF_S_STX, - BPF_S_JMP_JA, - BPF_S_JMP_JEQ_K, - BPF_S_JMP_JEQ_X, - BPF_S_JMP_JGE_K, - BPF_S_JMP_JGE_X, - BPF_S_JMP_JGT_K, - BPF_S_JMP_JGT_X, - BPF_S_JMP_JSET_K, - BPF_S_JMP_JSET_X, - /* Ancillary data */ - BPF_S_ANC_PROTOCOL, - BPF_S_ANC_PKTTYPE, - BPF_S_ANC_IFINDEX, - BPF_S_ANC_NLATTR, - BPF_S_ANC_NLATTR_NEST, - BPF_S_ANC_MARK, - BPF_S_ANC_QUEUE, - BPF_S_ANC_HATYPE, - BPF_S_ANC_RXHASH, - BPF_S_ANC_CPU, - BPF_S_ANC_ALU_XOR_X, - BPF_S_ANC_VLAN_TAG, - BPF_S_ANC_VLAN_TAG_PRESENT, - BPF_S_ANC_PAY_OFFSET, - BPF_S_ANC_RANDOM, -}; - #endif /* __LINUX_FILTER_H__ */ diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 1036b6f2fded..44e69483b383 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) u32 k = ftest->k; switch (code) { - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: ftest->code = BPF_LDX | BPF_W | BPF_ABS; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; - case BPF_S_LD_W_LEN: + case BPF_LD | BPF_W | BPF_LEN: ftest->code = BPF_LD | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; - case BPF_S_LDX_W_LEN: + case BPF_LDX | BPF_W | BPF_LEN: ftest->code = BPF_LDX | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ - case BPF_S_RET_K: - case BPF_S_RET_A: - case BPF_S_ALU_ADD_K: - case BPF_S_ALU_ADD_X: - case BPF_S_ALU_SUB_K: - case BPF_S_ALU_SUB_X: - case BPF_S_ALU_MUL_K: - case BPF_S_ALU_MUL_X: - case BPF_S_ALU_DIV_X: - case BPF_S_ALU_AND_K: - case BPF_S_ALU_AND_X: - case BPF_S_ALU_OR_K: - case BPF_S_ALU_OR_X: - case BPF_S_ALU_XOR_K: - case BPF_S_ALU_XOR_X: - case BPF_S_ALU_LSH_K: - case BPF_S_ALU_LSH_X: - case BPF_S_ALU_RSH_K: - case BPF_S_ALU_RSH_X: - case BPF_S_ALU_NEG: - case BPF_S_LD_IMM: - case BPF_S_LDX_IMM: - case BPF_S_MISC_TAX: - case BPF_S_MISC_TXA: - case BPF_S_ALU_DIV_K: - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: - case BPF_S_ST: - case BPF_S_STX: - case BPF_S_JMP_JA: - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_K: - case BPF_S_JMP_JSET_X: - sk_decode_filter(ftest, ftest); + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_NEG: + case BPF_LD | BPF_IMM: + case BPF_LDX | BPF_IMM: + case BPF_MISC | BPF_TAX: + case BPF_MISC | BPF_TXA: + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: + case BPF_ST: + case BPF_STX: + case BPF_JMP | BPF_JA: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: continue; default: return -EINVAL; diff --git a/net/core/filter.c b/net/core/filter.c index 2c2d35d9d101..328aaf6ff4d1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -536,11 +536,13 @@ load_word: * Output: * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ + ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be32(ptr); CONT; } + return 0; LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; @@ -550,6 +552,7 @@ load_half: BPF_R0 = get_unaligned_be16(ptr); CONT; } + return 0; LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ off = K; @@ -559,6 +562,7 @@ load_byte: BPF_R0 = *(u8 *)ptr; CONT; } + return 0; LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; @@ -1136,44 +1140,46 @@ err: */ static int check_load_and_stores(struct sock_filter *filter, int flen) { - u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ + u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0; BUILD_BUG_ON(BPF_MEMWORDS > 16); + masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; + memset(masks, 0xff, flen * sizeof(*masks)); for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc]; switch (filter[pc].code) { - case BPF_S_ST: - case BPF_S_STX: + case BPF_ST: + case BPF_STX: memvalid |= (1 << filter[pc].k); break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; - case BPF_S_JMP_JA: - /* a jump must set masks on target */ + case BPF_JMP | BPF_JA: + /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* a jump must set masks on targets */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; @@ -1185,6 +1191,72 @@ error: return ret; } +static bool chk_code_allowed(u16 code_to_probe) +{ + static const bool codes[] = { + /* 32 bit ALU operations */ + [BPF_ALU | BPF_ADD | BPF_K] = true, + [BPF_ALU | BPF_ADD | BPF_X] = true, + [BPF_ALU | BPF_SUB | BPF_K] = true, + [BPF_ALU | BPF_SUB | BPF_X] = true, + [BPF_ALU | BPF_MUL | BPF_K] = true, + [BPF_ALU | BPF_MUL | BPF_X] = true, + [BPF_ALU | BPF_DIV | BPF_K] = true, + [BPF_ALU | BPF_DIV | BPF_X] = true, + [BPF_ALU | BPF_MOD | BPF_K] = true, + [BPF_ALU | BPF_MOD | BPF_X] = true, + [BPF_ALU | BPF_AND | BPF_K] = true, + [BPF_ALU | BPF_AND | BPF_X] = true, + [BPF_ALU | BPF_OR | BPF_K] = true, + [BPF_ALU | BPF_OR | BPF_X] = true, + [BPF_ALU | BPF_XOR | BPF_K] = true, + [BPF_ALU | BPF_XOR | BPF_X] = true, + [BPF_ALU | BPF_LSH | BPF_K] = true, + [BPF_ALU | BPF_LSH | BPF_X] = true, + [BPF_ALU | BPF_RSH | BPF_K] = true, + [BPF_ALU | BPF_RSH | BPF_X] = true, + [BPF_ALU | BPF_NEG] = true, + /* Load instructions */ + [BPF_LD | BPF_W | BPF_ABS] = true, + [BPF_LD | BPF_H | BPF_ABS] = true, + [BPF_LD | BPF_B | BPF_ABS] = true, + [BPF_LD | BPF_W | BPF_LEN] = true, + [BPF_LD | BPF_W | BPF_IND] = true, + [BPF_LD | BPF_H | BPF_IND] = true, + [BPF_LD | BPF_B | BPF_IND] = true, + [BPF_LD | BPF_IMM] = true, + [BPF_LD | BPF_MEM] = true, + [BPF_LDX | BPF_W | BPF_LEN] = true, + [BPF_LDX | BPF_B | BPF_MSH] = true, + [BPF_LDX | BPF_IMM] = true, + [BPF_LDX | BPF_MEM] = true, + /* Store instructions */ + [BPF_ST] = true, + [BPF_STX] = true, + /* Misc instructions */ + [BPF_MISC | BPF_TAX] = true, + [BPF_MISC | BPF_TXA] = true, + /* Return instructions */ + [BPF_RET | BPF_K] = true, + [BPF_RET | BPF_A] = true, + /* Jump instructions */ + [BPF_JMP | BPF_JA] = true, + [BPF_JMP | BPF_JEQ | BPF_K] = true, + [BPF_JMP | BPF_JEQ | BPF_X] = true, + [BPF_JMP | BPF_JGE | BPF_K] = true, + [BPF_JMP | BPF_JGE | BPF_X] = true, + [BPF_JMP | BPF_JGT | BPF_K] = true, + [BPF_JMP | BPF_JGT | BPF_X] = true, + [BPF_JMP | BPF_JSET | BPF_K] = true, + [BPF_JMP | BPF_JSET | BPF_X] = true, + }; + + if (code_to_probe >= ARRAY_SIZE(codes)) + return false; + + return codes[code_to_probe]; +} + /** * sk_chk_filter - verify socket filter code * @filter: filter to verify @@ -1201,154 +1273,76 @@ error: */ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) { - /* - * Valid instructions are initialized to non-0. - * Invalid instructions are initialized to 0. - */ - static const u8 codes[] = { - [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K, - [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X, - [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K, - [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X, - [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K, - [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X, - [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X, - [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K, - [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X, - [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K, - [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X, - [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K, - [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X, - [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K, - [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X, - [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K, - [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X, - [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K, - [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X, - [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG, - [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS, - [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS, - [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS, - [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN, - [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND, - [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND, - [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND, - [BPF_LD|BPF_IMM] = BPF_S_LD_IMM, - [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN, - [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH, - [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM, - [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX, - [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA, - [BPF_RET|BPF_K] = BPF_S_RET_K, - [BPF_RET|BPF_A] = BPF_S_RET_A, - [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K, - [BPF_LD|BPF_MEM] = BPF_S_LD_MEM, - [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM, - [BPF_ST] = BPF_S_ST, - [BPF_STX] = BPF_S_STX, - [BPF_JMP|BPF_JA] = BPF_S_JMP_JA, - [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K, - [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X, - [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K, - [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X, - [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K, - [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X, - [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K, - [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X, - }; - int pc; bool anc_found; + int pc; if (flen == 0 || flen > BPF_MAXINSNS) return -EINVAL; - /* check the filter code now */ + /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; - u16 code = ftest->code; - if (code >= ARRAY_SIZE(codes)) - return -EINVAL; - code = codes[code]; - if (!code) + /* May we actually operate on this code? */ + if (!chk_code_allowed(ftest->code)) return -EINVAL; + /* Some instructions need special checks */ - switch (code) { - case BPF_S_ALU_DIV_K: - case BPF_S_ALU_MOD_K: - /* check for division by zero */ + switch (ftest->code) { + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: - case BPF_S_ST: - case BPF_S_STX: - /* check for invalid memory addresses */ + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: + case BPF_ST: + case BPF_STX: + /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; - case BPF_S_JMP_JA: - /* - * Note, the large ftest->k might cause loops. + case BPF_JMP | BPF_JA: + /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ - if (ftest->k >= (unsigned int)(flen-pc-1)) + if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* for conditionals both must be safe */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: anc_found = false; -#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ - code = BPF_S_ANC_##CODE; \ - anc_found = true; \ - break - switch (ftest->k) { - ANCILLARY(PROTOCOL); - ANCILLARY(PKTTYPE); - ANCILLARY(IFINDEX); - ANCILLARY(NLATTR); - ANCILLARY(NLATTR_NEST); - ANCILLARY(MARK); - ANCILLARY(QUEUE); - ANCILLARY(HATYPE); - ANCILLARY(RXHASH); - ANCILLARY(CPU); - ANCILLARY(ALU_XOR_X); - ANCILLARY(VLAN_TAG); - ANCILLARY(VLAN_TAG_PRESENT); - ANCILLARY(PAY_OFFSET); - ANCILLARY(RANDOM); - } - - /* ancillary operation unknown or unsupported */ + if (bpf_anc_helper(ftest) & BPF_ANC) + anc_found = true; + /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } - ftest->code = code; } - /* last instruction must be a RET code */ + /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { - case BPF_S_RET_K: - case BPF_S_RET_A: + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: return check_load_and_stores(filter, flen); } + return -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); @@ -1448,7 +1442,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, { struct sock_filter *old_prog; struct sk_filter *old_fp; - int i, err, new_len, old_len = fp->len; + int err, new_len, old_len = fp->len; /* We are free to overwrite insns et al right here as it * won't be used at this point in time anymore internally @@ -1458,13 +1452,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct sock_filter_int)); - /* For now, we need to unfiddle BPF_S_* identifiers in place. - * This can sooner or later on be subject to removal, e.g. when - * JITs have been converted. - */ - for (i = 0; i < fp->len; i++) - sk_decode_filter(&fp->insns[i], &fp->insns[i]); - /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. @@ -1706,84 +1693,6 @@ int sk_detach_filter(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_detach_filter); -void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) -{ - static const u16 decodes[] = { - [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, - [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X, - [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K, - [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X, - [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K, - [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X, - [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X, - [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K, - [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X, - [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K, - [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X, - [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K, - [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X, - [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K, - [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X, - [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K, - [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X, - [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K, - [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X, - [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG, - [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS, - [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS, - [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, - [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, - [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, - [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND, - [BPF_S_LD_IMM] = BPF_LD|BPF_IMM, - [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN, - [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH, - [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM, - [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX, - [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA, - [BPF_S_RET_K] = BPF_RET|BPF_K, - [BPF_S_RET_A] = BPF_RET|BPF_A, - [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K, - [BPF_S_LD_MEM] = BPF_LD|BPF_MEM, - [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM, - [BPF_S_ST] = BPF_ST, - [BPF_S_STX] = BPF_STX, - [BPF_S_JMP_JA] = BPF_JMP|BPF_JA, - [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K, - [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X, - [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K, - [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X, - [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K, - [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X, - [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K, - [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X, - }; - u16 code; - - code = filt->code; - - to->code = decodes[code]; - to->jt = filt->jt; - to->jf = filt->jf; - to->k = filt->k; -} - int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) { -- cgit v1.2.3-59-g8ed1b From f8f6d679aaa78b989d9aee8d2935066fbdca2a30 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 May 2014 10:22:51 +0200 Subject: net: filter: improve filter block macros Commit 9739eef13c92 ("net: filter: make BPF conversion more readable") started to introduce helper macros similar to BPF_STMT()/BPF_JUMP() macros from classic BPF. However, quite some statements in the filter conversion functions remained in the old style which gives a mixture of block macros and non block macros in the code. This patch makes the block macros itself more readable by using explicit member initialization, and converts the remaining ones where possible to remain in a more consistent state. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 255 +++++++++++++++++++++++++++++++++++++++---------- net/core/filter.c | 196 ++++++++++++++----------------------- 2 files changed, 277 insertions(+), 174 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 49ef7a298c92..f0c2ad43b4af 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -76,56 +76,211 @@ enum { /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 -/* bpf_add|sub|...: a += x, bpf_mov: a = x */ -#define BPF_ALU64_REG(op, a, x) \ - ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) -#define BPF_ALU32_REG(op, a, x) \ - ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) - -/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ -#define BPF_ALU64_IMM(op, a, imm) \ - ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) -#define BPF_ALU32_IMM(op, a, imm) \ - ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) - -/* R0 = *(uint *) (skb->data + off) */ -#define BPF_LD_ABS(size, off) \ - ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) - -/* R0 = *(uint *) (skb->data + x + off) */ -#define BPF_LD_IND(size, x, off) \ - ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) - -/* a = *(uint *) (x + off) */ -#define BPF_LDX_MEM(sz, a, x, off) \ - ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) - -/* if (a 'op' x) goto pc+off */ -#define BPF_JMP_REG(op, a, x, off) \ - ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) - -/* if (a 'op' imm) goto pc+off */ -#define BPF_JMP_IMM(op, a, imm, off) \ - ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) - -#define BPF_EXIT_INSN() \ - ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) - -static inline int size_to_bpf(int size) -{ - switch (size) { - case 1: - return BPF_B; - case 2: - return BPF_H; - case 4: - return BPF_W; - case 8: - return BPF_DW; - default: - return -EINVAL; - } -} +/* Helper macros for filter block array initializers. */ + +/* ALU ops on registers, bpf_add|sub|...: A += X */ + +#define BPF_ALU64_REG(OP, A, X) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_ALU32_REG(OP, A, X) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = 0 }) + +/* ALU ops on immediates, bpf_add|sub|...: A += IMM */ + +#define BPF_ALU64_IMM(OP, A, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ + .a_reg = A, \ + .x_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_ALU32_IMM(OP, A, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ + .a_reg = A, \ + .x_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ + +#define BPF_ENDIAN(TYPE, A, LEN) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ + .a_reg = A, \ + .x_reg = 0, \ + .off = 0, \ + .imm = LEN }) + +/* Short form of mov, A = X */ + +#define BPF_MOV64_REG(A, X) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_MOV32_REG(A, X) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = 0 }) + +/* Short form of mov, A = IMM */ + +#define BPF_MOV64_IMM(A, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_K, \ + .a_reg = A, \ + .x_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_IMM(A, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_MOV | BPF_K, \ + .a_reg = A, \ + .x_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */ + +#define BPF_MOV64_RAW(TYPE, A, X, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_RAW(TYPE, A, X, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ + .a_reg = A, \ + .x_reg = X, \ + .off = 0, \ + .imm = IMM }) + +/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */ + +#define BPF_LD_ABS(SIZE, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ + .a_reg = 0, \ + .x_reg = 0, \ + .off = 0, \ + .imm = OFF }) + +/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */ + +#define BPF_LD_IND(SIZE, X, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ + .a_reg = 0, \ + .x_reg = X, \ + .off = 0, \ + .imm = OFF }) + +/* Memory store, A = *(uint *) (X + OFF), and vice versa */ + +#define BPF_LDX_MEM(SIZE, A, X, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ + .a_reg = A, \ + .x_reg = X, \ + .off = OFF, \ + .imm = 0 }) + +#define BPF_STX_MEM(SIZE, A, X, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ + .a_reg = A, \ + .x_reg = X, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */ + +#define BPF_JMP_REG(OP, A, X, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ + .a_reg = A, \ + .x_reg = X, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */ + +#define BPF_JMP_IMM(OP, A, IMM, OFF) \ + ((struct sock_filter_int) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .a_reg = A, \ + .x_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Function call */ + +#define BPF_EMIT_CALL(FUNC) \ + ((struct sock_filter_int) { \ + .code = BPF_JMP | BPF_CALL, \ + .a_reg = 0, \ + .x_reg = 0, \ + .off = 0, \ + .imm = ((FUNC) - __bpf_call_base) }) + +/* Raw code statement block */ + +#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \ + ((struct sock_filter_int) { \ + .code = CODE, \ + .a_reg = A, \ + .x_reg = X, \ + .off = OFF, \ + .imm = IMM }) + +/* Program exit */ + +#define BPF_EXIT_INSN() \ + ((struct sock_filter_int) { \ + .code = BPF_JMP | BPF_EXIT, \ + .a_reg = 0, \ + .x_reg = 0, \ + .off = 0, \ + .imm = 0 }) + +#define bytes_to_bpf_size(bytes) \ +({ \ + int bpf_size = -EINVAL; \ + \ + if (bytes == sizeof(u8)) \ + bpf_size = BPF_B; \ + else if (bytes == sizeof(u16)) \ + bpf_size = BPF_H; \ + else if (bytes == sizeof(u32)) \ + bpf_size = BPF_W; \ + else if (bytes == sizeof(u64)) \ + bpf_size = BPF_DW; \ + \ + bpf_size; \ +}) /* Macro to invoke filter function. */ #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) diff --git a/net/core/filter.c b/net/core/filter.c index 328aaf6ff4d1..842f8393121d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -672,14 +672,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); /* A = *(u16 *) (ctx + offsetof(protocol)) */ - *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, - offsetof(struct sk_buff, protocol)); - insn++; - + *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ - insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; - insn->a_reg = BPF_REG_A; - insn->imm = 16; + *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PKTTYPE: @@ -688,37 +684,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, if (insn->off < 0) return false; insn++; - *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); break; case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: - *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)), - BPF_REG_TMP, BPF_REG_CTX, - offsetof(struct sk_buff, dev)); - insn++; - - /* if (tmp != 0) goto pc+1 */ - *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); - insn++; - - *insn = BPF_EXIT_INSN(); - insn++; - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); - - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_TMP; - - if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->off = offsetof(struct net_device, ifindex); - } else { - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->off = offsetof(struct net_device, type); - } + BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0); + + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), + BPF_REG_TMP, BPF_REG_CTX, + offsetof(struct sk_buff, dev)); + /* if (tmp != 0) goto pc + 1 */ + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); + *insn++ = BPF_EXIT_INSN(); + if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, + offsetof(struct net_device, ifindex)); + else + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, + offsetof(struct net_device, type)); break; case SKF_AD_OFF + SKF_AD_MARK: @@ -745,22 +731,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_VLAN_TAG: case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - - /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ - *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, - offsetof(struct sk_buff, vlan_tci)); - insn++; - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ + *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, vlan_tci)); if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, ~VLAN_TAG_PRESENT); } else { /* A >>= 12 */ - *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); - insn++; - + *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); /* A &= 1 */ *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); } @@ -772,34 +753,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = ctx */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); /* Emit call(ctx, arg2=A, arg3=X) */ - insn->code = BPF_JMP | BPF_CALL; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: - insn->imm = __skb_get_pay_offset - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_pay_offset); break; case SKF_AD_OFF + SKF_AD_NLATTR: - insn->imm = __skb_get_nlattr - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_nlattr); break; case SKF_AD_OFF + SKF_AD_NLATTR_NEST: - insn->imm = __skb_get_nlattr_nest - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); break; case SKF_AD_OFF + SKF_AD_CPU: - insn->imm = __get_raw_cpu_id - __bpf_call_base; + *insn = BPF_EMIT_CALL(__get_raw_cpu_id); break; case SKF_AD_OFF + SKF_AD_RANDOM: - insn->imm = __get_random_u32 - __bpf_call_base; + *insn = BPF_EMIT_CALL(__get_random_u32); break; } break; @@ -871,9 +845,8 @@ do_pass: new_insn = new_prog; fp = prog; - if (new_insn) { - *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); - } + if (new_insn) + *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); new_insn++; for (i = 0; i < len; fp++, i++) { @@ -921,17 +894,16 @@ do_pass: convert_bpf_extensions(fp, &insn)) break; - insn->code = fp->code; - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_X; - insn->imm = fp->k; + *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break; - /* Jump opcodes map as-is, but offsets need adjustment. */ - case BPF_JMP | BPF_JA: - target = i + fp->k + 1; - insn->code = fp->code; -#define EMIT_JMP \ + /* Jump transformation cannot use BPF block macros + * everywhere as offset calculation and target updates + * require a bit more work than the rest, i.e. jump + * opcodes map as-is, but offsets need adjustment. + */ + +#define BPF_EMIT_JMP \ do { \ if (target >= len || target < 0) \ goto err; \ @@ -940,7 +912,10 @@ do_pass: insn->off -= insn - tmp_insns; \ } while (0) - EMIT_JMP; + case BPF_JMP | BPF_JA: + target = i + fp->k + 1; + insn->code = fp->code; + BPF_EMIT_JMP; break; case BPF_JMP | BPF_JEQ | BPF_K: @@ -956,10 +931,7 @@ do_pass: * immediate into tmp register and use it * in compare insn. */ - insn->code = BPF_ALU | BPF_MOV | BPF_K; - insn->a_reg = BPF_REG_TMP; - insn->imm = fp->k; - insn++; + *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); insn->a_reg = BPF_REG_A; insn->x_reg = BPF_REG_TMP; @@ -975,7 +947,7 @@ do_pass: if (fp->jf == 0) { insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; target = i + fp->jt + 1; - EMIT_JMP; + BPF_EMIT_JMP; break; } @@ -983,116 +955,94 @@ do_pass: if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { insn->code = BPF_JMP | BPF_JNE | bpf_src; target = i + fp->jf + 1; - EMIT_JMP; + BPF_EMIT_JMP; break; } /* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; - EMIT_JMP; + BPF_EMIT_JMP; insn++; insn->code = BPF_JMP | BPF_JA; target = i + fp->jf + 1; - EMIT_JMP; + BPF_EMIT_JMP; break; /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: /* tmp = A */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); /* A = BPF_R0 = *(u8 *) (skb->data + K) */ - *insn = BPF_LD_ABS(BPF_B, fp->k); - insn++; - + *insn++ = BPF_LD_ABS(BPF_B, fp->k); /* A &= 0xf */ - *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); - insn++; - + *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); /* A <<= 2 */ - *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); - insn++; - + *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); /* X = A */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = tmp */ - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); + *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); break; /* RET_K, RET_A are remaped into 2 insns. */ case BPF_RET | BPF_A: case BPF_RET | BPF_K: - insn->code = BPF_ALU | BPF_MOV | - (BPF_RVAL(fp->code) == BPF_K ? - BPF_K : BPF_X); - insn->a_reg = 0; - insn->x_reg = BPF_REG_A; - insn->imm = fp->k; - insn++; - + *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ? + BPF_K : BPF_X, BPF_REG_0, + BPF_REG_A, fp->k); *insn = BPF_EXIT_INSN(); break; /* Store to stack. */ case BPF_ST: case BPF_STX: - insn->code = BPF_STX | BPF_MEM | BPF_W; - insn->a_reg = BPF_REG_FP; - insn->x_reg = fp->code == BPF_ST ? - BPF_REG_A : BPF_REG_X; - insn->off = -(BPF_MEMWORDS - fp->k) * 4; + *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == + BPF_ST ? BPF_REG_A : BPF_REG_X, + -(BPF_MEMWORDS - fp->k) * 4); break; /* Load from stack. */ case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - BPF_REG_A : BPF_REG_X; - insn->x_reg = BPF_REG_FP; - insn->off = -(BPF_MEMWORDS - fp->k) * 4; + *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, BPF_REG_FP, + -(BPF_MEMWORDS - fp->k) * 4); break; /* A = K or X = K */ case BPF_LD | BPF_IMM: case BPF_LDX | BPF_IMM: - insn->code = BPF_ALU | BPF_MOV | BPF_K; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - BPF_REG_A : BPF_REG_X; - insn->imm = fp->k; + *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, fp->k); break; /* X = A */ case BPF_MISC | BPF_TAX: - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); + *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); break; /* A = X */ case BPF_MISC | BPF_TXA: - *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); + *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); break; /* A = skb->len or X = skb->len */ case BPF_LD | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - BPF_REG_A : BPF_REG_X; - insn->x_reg = BPF_REG_CTX; - insn->off = offsetof(struct sk_buff, len); + *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, BPF_REG_CTX, + offsetof(struct sk_buff, len)); break; - /* access seccomp_data fields */ + /* Access seccomp_data fields. */ case BPF_LDX | BPF_ABS | BPF_W: /* A = *(u32 *) (ctx + K) */ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break; + /* Unkown instruction. */ default: goto err; } @@ -1101,7 +1051,6 @@ do_pass: if (new_prog) memcpy(new_insn, tmp_insns, sizeof(*insn) * (insn - tmp_insns)); - new_insn += insn - tmp_insns; } @@ -1116,7 +1065,6 @@ do_pass: new_flen = new_insn - new_prog; if (pass > 2) goto err; - goto do_pass; } -- cgit v1.2.3-59-g8ed1b From 670e5b8eaf85704742bc3cb1df51fdd3ce08fc15 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 28 May 2014 18:44:46 -0700 Subject: net: Add support for device specific address syncing This change provides a function to be used in order to break the ndo_set_rx_mode call into a set of address add and remove calls. The code is based on the implementation of dev_uc_sync/dev_mc_sync. Since they essentially do the same thing but with only one dev I simply named my functions __dev_uc_sync/__dev_mc_sync. I also implemented an unsync version of the functions as well to allow for cleanup on close. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdevice.h | 73 ++++++++++++++++++++++++++++++++++++++++ net/core/dev_addr_lists.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) (limited to 'net/core') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2db1610bf109..774e5391eb8e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3003,6 +3003,15 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); +int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)); +void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ @@ -3023,6 +3032,38 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from); void dev_uc_flush(struct net_device *dev); void dev_uc_init(struct net_device *dev); +/** + * __dev_uc_sync - Synchonize device's unicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + **/ +static inline int __dev_uc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +} + +/** + * __dev_uc_unsync - Remove synchonized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_uc_sync(). + **/ +static inline void __dev_uc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->uc, dev, unsync); +} + /* Functions used for multicast addresses handling */ int dev_mc_add(struct net_device *dev, const unsigned char *addr); int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); @@ -3035,6 +3076,38 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from); void dev_mc_flush(struct net_device *dev); void dev_mc_init(struct net_device *dev); +/** + * __dev_mc_sync - Synchonize device's multicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + **/ +static inline int __dev_mc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +} + +/** + * __dev_mc_unsync - Remove synchonized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_mc_sync(). + **/ +static inline void __dev_mc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->mc, dev, unsync); +} + /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); void __dev_set_rx_mode(struct net_device *dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 329d5794e7dc..b6b230600b97 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -225,6 +225,91 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, } EXPORT_SYMBOL(__hw_addr_unsync); +/** + * __hw_addr_sync_dev - Synchonize device's multicast list + * @list: address list to syncronize + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * This funciton is intended to be called from the ndo_set_rx_mode + * function of devices that require explicit address add/remove + * notifications. The unsync function may be NULL in which case + * the addresses requiring removal will simply be removed without + * any notification to the device. + **/ +int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { + if (!ha->sync_cnt || ha->refcount != 1) + continue; + + /* if unsync is defined and fails defer unsyncing address */ + if (unsync && unsync(dev, ha->addr)) + continue; + + ha->sync_cnt--; + __hw_addr_del_entry(list, ha, false, false); + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { + if (ha->sync_cnt) + continue; + + err = sync(dev, ha->addr); + if (err) + return err; + + ha->sync_cnt++; + ha->refcount++; + } + + return 0; +} +EXPORT_SYMBOL(__hw_addr_sync_dev); + +/** + * __hw_addr_unsync_dev - Remove synchonized addresses from device + * @list: address list to remove syncronized addresses from + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by __hw_addr_sync_dev(). + * This function is intended to be called from the ndo_stop or ndo_open + * functions on devices that require explicit address add/remove + * notifications. If the unsync function pointer is NULL then this function + * can be used to just reset the sync_cnt for the addresses in the list. + **/ +void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { + if (!ha->sync_cnt) + continue; + + /* if unsync is defined and fails defer unsyncing address */ + if (unsync && unsync(dev, ha->addr)) + continue; + + ha->sync_cnt--; + __hw_addr_del_entry(list, ha, false, false); + } +} +EXPORT_SYMBOL(__hw_addr_unsync_dev); + static void __hw_addr_flush(struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha, *tmp; -- cgit v1.2.3-59-g8ed1b From 73f156a6e8c1074ac6327e0abd1169e95eb66463 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 2 Jun 2014 05:26:03 -0700 Subject: inetpeer: get rid of ip_id_count Ideally, we would need to generate IP ID using a per destination IP generator. linux kernels used inet_peer cache for this purpose, but this had a huge cost on servers disabling MTU discovery. 1) each inet_peer struct consumes 192 bytes 2) inetpeer cache uses a binary tree of inet_peer structs, with a nominal size of ~66000 elements under load. 3) lookups in this tree are hitting a lot of cache lines, as tree depth is about 20. 4) If server deals with many tcp flows, we have a high probability of not finding the inet_peer, allocating a fresh one, inserting it in the tree with same initial ip_id_count, (cf secure_ip_id()) 5) We garbage collect inet_peer aggressively. IP ID generation do not have to be 'perfect' Goal is trying to avoid duplicates in a short period of time, so that reassembly units have a chance to complete reassembly of fragments belonging to one message before receiving other fragments with a recycled ID. We simply use an array of generators, and a Jenkin hash using the dst IP as a key. ipv6_select_ident() is put back into net/ipv6/ip6_output.c where it belongs (it is only used from this file) secure_ip_id() and secure_ipv6_id() no longer are needed. Rename ip_select_ident_more() to ip_select_ident_segs() to avoid unnecessary decrement/increment of the number of segments. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ppp/pptp.c | 2 +- include/net/inetpeer.h | 23 +++------------------ include/net/ip.h | 40 ++++++++++++++++++++---------------- include/net/ipv6.h | 2 -- include/net/secure_seq.h | 2 -- net/core/secure_seq.c | 25 ----------------------- net/ipv4/igmp.c | 4 ++-- net/ipv4/inetpeer.c | 18 ----------------- net/ipv4/ip_output.c | 7 +++---- net/ipv4/ip_tunnel_core.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/raw.c | 2 +- net/ipv4/route.c | 45 +++++++++++++++-------------------------- net/ipv4/xfrm4_mode_tunnel.c | 2 +- net/ipv6/ip6_output.c | 12 +++++++++++ net/ipv6/output_core.c | 30 --------------------------- net/netfilter/ipvs/ip_vs_xmit.c | 2 +- 17 files changed, 65 insertions(+), 155 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 01805319e1e0..1aff970be33e 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) nf_reset(skb); skb->ip_summed = CHECKSUM_NONE; - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); ip_send_check(iph); ip_local_out(skb); diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 6efe73c79c52..823ec7bb9c67 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -41,14 +41,13 @@ struct inet_peer { struct rcu_head gc_rcu; }; /* - * Once inet_peer is queued for deletion (refcnt == -1), following fields - * are not available: rid, ip_id_count + * Once inet_peer is queued for deletion (refcnt == -1), following field + * is not available: rid * We can share memory with rcu_head to help keep inet_peer small. */ union { struct { atomic_t rid; /* Frag reception counter */ - atomic_t ip_id_count; /* IP ID for the next packet */ }; struct rcu_head rcu; struct inet_peer *gc_next; @@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); void inetpeer_invalidate_tree(struct inet_peer_base *); /* - * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, + * temporary check to make sure we dont access rid, tcp_ts, * tcp_ts_stamp if no refcount is taken on inet_peer */ static inline void inet_peer_refcheck(const struct inet_peer *p) @@ -173,20 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p) WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); } - -/* can be called with or without local BH being disabled */ -static inline int inet_getid(struct inet_peer *p, int more) -{ - int old, new; - more++; - inet_peer_refcheck(p); - do { - old = atomic_read(&p->ip_id_count); - new = old + more; - if (!new) - new = 1; - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); - return new; -} - #endif /* _NET_INETPEER_H */ diff --git a/include/net/ip.h b/include/net/ip.h index 2e4947895d75..0e795df05ec9 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -309,9 +309,19 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) } } -void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); +#define IP_IDENTS_SZ 2048u +extern atomic_t *ip_idents; -static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) +static inline u32 ip_idents_reserve(u32 hash, int segs) +{ + atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ; + + return atomic_add_return(segs, id_ptr) - segs; +} + +void __ip_select_ident(struct iphdr *iph, int segs); + +static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) { struct iphdr *iph = ip_hdr(skb); @@ -321,24 +331,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s * does not change, they drop every other packet in * a TCP stream using header compression. */ - iph->id = (sk && inet_sk(sk)->inet_daddr) ? - htons(inet_sk(sk)->inet_id++) : 0; - } else - __ip_select_ident(iph, dst, 0); -} - -static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) -{ - struct iphdr *iph = ip_hdr(skb); - - if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { if (sk && inet_sk(sk)->inet_daddr) { iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += 1 + more; - } else + inet_sk(sk)->inet_id += segs; + } else { iph->id = 0; - } else - __ip_select_ident(iph, dst, more); + } + } else { + __ip_select_ident(iph, segs); + } +} + +static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) +{ + ip_select_ident_segs(skb, sk, 1); } static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ba810d0546bc..574337fe72dd 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -668,8 +668,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); - int ip6_dst_hoplimit(struct dst_entry *dst); static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index f257486f17be..3f36d45b714a 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -3,8 +3,6 @@ #include -__u32 secure_ip_id(__be32 daddr); -__u32 secure_ipv6_id(const __be32 daddr[4]); u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 897da56f3aff..ba71212f0251 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #endif #ifdef CONFIG_INET -__u32 secure_ip_id(__be32 daddr) -{ - u32 hash[MD5_DIGEST_WORDS]; - - net_secret_init(); - hash[0] = (__force __u32) daddr; - hash[1] = net_secret[13]; - hash[2] = net_secret[14]; - hash[3] = net_secret[15]; - - md5_transform(hash, net_secret); - - return hash[0]; -} - -__u32 secure_ipv6_id(const __be32 daddr[4]) -{ - __u32 hash[4]; - - net_secret_init(); - memcpy(hash, daddr, 16); - md5_transform(hash, net_secret); - - return hash[0]; -} __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 17d34e3c2ac3..6748d420f714 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) pip->saddr = fl4.saddr; pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); ((u8 *)&pip[1])[0] = IPOPT_RA; ((u8 *)&pip[1])[1] = 4; ((u8 *)&pip[1])[2] = 0; @@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, iph->daddr = dst; iph->saddr = fl4.saddr; iph->protocol = IPPROTO_IGMP; - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); ((u8 *)&iph[1])[0] = IPOPT_RA; ((u8 *)&iph[1])[1] = 4; ((u8 *)&iph[1])[2] = 0; diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index c98cf141f4ed..4ced1b9a97f0 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -26,20 +26,7 @@ * Theory of operations. * We keep one entry for each peer IP address. The nodes contains long-living * information about the peer which doesn't depend on routes. - * At this moment this information consists only of ID field for the next - * outgoing IP packet. This field is incremented with each packet as encoded - * in inet_getid() function (include/net/inetpeer.h). - * At the moment of writing this notes identifier of IP packets is generated - * to be unpredictable using this code only for packets subjected - * (actually or potentially) to defragmentation. I.e. DF packets less than - * PMTU in size when local fragmentation is disabled use a constant ID and do - * not use this code (see ip_select_ident() in include/net/ip.h). * - * Route cache entries hold references to our nodes. - * New cache entries get references via lookup by destination IP address in - * the avl tree. The reference is grabbed only when it's needed i.e. only - * when we try to output IP packet which needs an unpredictable ID (see - * __ip_select_ident() in net/ipv4/route.c). * Nodes are removed only when reference counter goes to 0. * When it's happened the node may be removed when a sufficient amount of * time has been passed since its last use. The less-recently-used entry can @@ -62,7 +49,6 @@ * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * daddr: unchangeable - * ip_id_count: atomic value (no lock needed) */ static struct kmem_cache *peer_cachep __read_mostly; @@ -497,10 +483,6 @@ relookup: p->daddr = *daddr; atomic_set(&p->refcnt, 1); atomic_set(&p->rid, 0); - atomic_set(&p->ip_id_count, - (daddr->family == AF_INET) ? - secure_ip_id(daddr->addr.a4) : - secure_ipv6_id(daddr->addr.a6)); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; /* 60*HZ is arbitrary, but chosen enough high so that the first diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6e231ab58d65..8d3b6b0e9857 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; - ip_select_ident(skb, &rt->dst, sk); + ip_select_ident(skb, sk); if (opt && opt->opt.optlen) { iph->ihl += opt->opt.optlen>>2; @@ -430,8 +430,7 @@ packet_routed: ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); } - ip_select_ident_more(skb, &rt->dst, sk, - (skb_shinfo(skb)->gso_segs ?: 1) - 1); + ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1); /* TODO : should we use skb->sk here instead of sk ? */ skb->priority = sk->sk_priority; @@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, iph->ttl = ttl; iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); - ip_select_ident(skb, &rt->dst, sk); + ip_select_ident(skb, sk); if (opt) { iph->ihl += opt->optlen>>2; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index bcf206c79005..847e69cbff7e 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; - __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); err = ip_local_out_sk(sk, skb); if (unlikely(net_xmit_eval(err))) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 2bc9cc47f246..65bcaa789043 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) iph->protocol = IPPROTO_IPIP; iph->ihl = 5; iph->tot_len = htons(skb->len); - ip_select_ident(skb, skb_dst(skb), NULL); + ip_select_ident(skb, NULL); ip_send_check(iph); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index a9dbe58bdfe7..2c65160565e1 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, iph->check = 0; iph->tot_len = htons(length); if (!iph->id) - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4154eb76b0ad..082239ffe34a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -89,6 +89,7 @@ #include #include #include +#include #include #include #include @@ -456,39 +457,19 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, return neigh_create(&arp_tbl, pkey, dev); } -/* - * Peer allocation may fail only in serious out-of-memory conditions. However - * we still can generate some output. - * Random ID selection looks a bit dangerous because we have no chances to - * select ID being unique in a reasonable period of time. - * But broken packet identifier may be better than no packet at all. - */ -static void ip_select_fb_ident(struct iphdr *iph) -{ - static DEFINE_SPINLOCK(ip_fb_id_lock); - static u32 ip_fallback_id; - u32 salt; +atomic_t *ip_idents __read_mostly; +EXPORT_SYMBOL(ip_idents); - spin_lock_bh(&ip_fb_id_lock); - salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); - iph->id = htons(salt & 0xFFFF); - ip_fallback_id = salt; - spin_unlock_bh(&ip_fb_id_lock); -} - -void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) +void __ip_select_ident(struct iphdr *iph, int segs) { - struct net *net = dev_net(dst->dev); - struct inet_peer *peer; + static u32 ip_idents_hashrnd __read_mostly; + u32 hash, id; - peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); - if (peer) { - iph->id = htons(inet_getid(peer, more)); - inet_putpeer(peer); - return; - } + net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); - ip_select_fb_ident(iph); + hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd); + id = ip_idents_reserve(hash, segs); + iph->id = htons(id); } EXPORT_SYMBOL(__ip_select_ident); @@ -2711,6 +2692,12 @@ int __init ip_rt_init(void) { int rc = 0; + ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); + if (!ip_idents) + panic("IP: failed to allocate ip_idents\n"); + + prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); + #ifdef CONFIG_IP_ROUTE_CLASSID ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); if (!ip_rt_acct) diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 05f2b484954f..91771a7c802f 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -58,12 +58,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); - ip_select_ident(skb, dst->child, NULL); top_iph->ttl = ip4_dst_hoplimit(dst->child); top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; + ip_select_ident(skb, NULL); return 0; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 85aaeca1f7f3..cb9df0eb4023 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -537,6 +537,18 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) skb_copy_secmark(to, from); } +static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) +{ + static u32 ip6_idents_hashrnd __read_mostly; + u32 hash, id; + + net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); + + hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); + id = ip_idents_reserve(hash, 1); + fhdr->identification = htonl(id); +} + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 6313abd53c9d..6179ac186ab9 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -8,36 +8,6 @@ #include #include -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) -{ - static atomic_t ipv6_fragmentation_id; - struct in6_addr addr; - int old, new; - -#if IS_ENABLED(CONFIG_IPV6) - struct inet_peer *peer; - struct net *net; - - net = dev_net(rt->dst.dev); - peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); - if (peer) { - fhdr->identification = htonl(inet_getid(peer, 0)); - inet_putpeer(peer); - return; - } -#endif - do { - old = atomic_read(&ipv6_fragmentation_id); - new = old + 1; - if (!new) - new = 1; - } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old); - - addr = rt->rt6i_dst.addr; - addr.s6_addr32[0] ^= (__force __be32)new; - fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32)); -} -EXPORT_SYMBOL(ipv6_select_ident); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 487b55e04337..73ba1cc7a88d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, iph->daddr = cp->daddr.ip; iph->saddr = saddr; iph->ttl = old_iph->ttl; - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; -- cgit v1.2.3-59-g8ed1b From fe62d001372388abb15a324148c913f9b43722a8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 15 May 2014 01:25:27 +0100 Subject: ethtool: Replace ethtool_ops::{get,set}_rxfh_indir() with {get,set}_rxfh() ETHTOOL_{G,S}RXFHINDIR and ETHTOOL_{G,S}RSSH should work for drivers regardless of whether they expose the hash key, unless you try to set a hash key for a driver that doesn't expose it. Signed-off-by: Ben Hutchings Acked-by: Jeff Kirsher --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 15 ++++++++------- drivers/net/ethernet/broadcom/tg3.c | 8 ++++---- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 8 ++++---- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 15 +++++++++------ drivers/net/ethernet/intel/igb/igb_ethtool.c | 9 +++++---- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 10 +++++----- drivers/net/ethernet/sfc/ethtool.c | 10 +++++----- drivers/net/vmxnet3/vmxnet3_ethtool.c | 8 ++++---- include/linux/ethtool.h | 6 ------ net/core/ethtool.c | 8 ++++---- 10 files changed, 48 insertions(+), 49 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 03224090ecf9..af138f8aa361 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) return T_ETH_INDIRECTION_TABLE_SIZE; } -static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) { struct bnx2x *bp = netdev_priv(dev); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; @@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) return 0; } -static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key) { struct bnx2x *bp = netdev_priv(dev); size_t i; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* - * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() + * The same as in bnx2x_get_rxfh: we can't use a memcpy() * as an internal storage of an indirection table is a u8 array * while indir->ring_index points to an array of u32. * @@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_rxnfc = bnx2x_get_rxnfc, .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, - .get_rxfh_indir = bnx2x_get_rxfh_indir, - .set_rxfh_indir = bnx2x_set_rxfh_indir, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, .get_module_info = bnx2x_get_module_info, @@ -3498,8 +3499,8 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .get_rxnfc = bnx2x_get_rxnfc, .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, - .get_rxfh_indir = bnx2x_get_rxfh_indir, - .set_rxfh_indir = bnx2x_set_rxfh_indir, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, }; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ccd90156aebc..8c2314ed260c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12501,7 +12501,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev) return size; } -static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) +static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) { struct tg3 *tp = netdev_priv(dev); int i; @@ -12512,7 +12512,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) return 0; } -static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key) { struct tg3 *tp = netdev_priv(dev); size_t i; @@ -14044,8 +14044,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_sset_count = tg3_get_sset_count, .get_rxnfc = tg3_get_rxnfc, .get_rxfh_indir_size = tg3_get_rxfh_indir_size, - .get_rxfh_indir = tg3_get_rxfh_indir, - .set_rxfh_indir = tg3_set_rxfh_indir, + .get_rxfh = tg3_get_rxfh, + .set_rxfh = tg3_set_rxfh, .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 266a5bc6aedf..8cf6be93f491 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2739,7 +2739,7 @@ static u32 get_rss_table_size(struct net_device *dev) return pi->rss_size; } -static int get_rss_table(struct net_device *dev, u32 *p) +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) { const struct port_info *pi = netdev_priv(dev); unsigned int n = pi->rss_size; @@ -2749,7 +2749,7 @@ static int get_rss_table(struct net_device *dev, u32 *p) return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p) +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) { unsigned int i; struct port_info *pi = netdev_priv(dev); @@ -2851,8 +2851,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_wol = set_wol, .get_rxnfc = get_rxnfc, .get_rxfh_indir_size = get_rss_table_size, - .get_rxfh_indir = get_rss_table, - .set_rxfh_indir = set_rss_table, + .get_rxfh = get_rss_table, + .set_rxfh = set_rss_table, .flash_device = set_flash, }; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 77e786d2d0e0..dbc8986c2dae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -626,13 +626,14 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) } /** - * i40evf_get_rxfh_indir - get the rx flow hash indirection table + * i40evf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table + * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir) +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; @@ -650,14 +651,16 @@ static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir) } /** - * i40evf_set_rxfh_indir - set the rx flow hash indirection table + * i40evf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table + * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. **/ -static int i40evf_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; @@ -691,8 +694,8 @@ static struct ethtool_ops i40evf_ethtool_ops = { .get_rxnfc = i40evf_get_rxnfc, .set_rxnfc = i40evf_set_rxnfc, .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, - .get_rxfh_indir = i40evf_get_rxfh_indir, - .set_rxfh_indir = i40evf_set_rxfh_indir, + .get_rxfh = i40evf_get_rxfh, + .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, }; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index a84297c85fb1..d8bbcf1873ca 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2830,7 +2830,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) { struct igb_adapter *adapter = netdev_priv(netdev); int i; @@ -2876,7 +2876,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } } -static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3025,8 +3026,8 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_module_info = igb_get_module_info, .get_module_eeprom = igb_get_module_eeprom, .get_rxfh_indir_size = igb_get_rxfh_indir_size, - .get_rxfh_indir = igb_get_rxfh_indir, - .set_rxfh_indir = igb_set_rxfh_indir, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, .get_channels = igb_get_channels, .set_channels = igb_set_channels, .begin = igb_ethtool_begin, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a72d99fd7a2d..263a1c7a3370 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) return priv->rx_ring_num; } -static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) +static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_rss_map *rss_map = &priv->rss_map; @@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) return err; } -static int mlx4_en_set_rxfh_indir(struct net_device *dev, - const u32 *ring_index) +static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, + const u8 *key) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -1224,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_rxnfc = mlx4_en_get_rxnfc, .set_rxnfc = mlx4_en_set_rxnfc, .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, - .get_rxfh_indir = mlx4_en_get_rxfh_indir, - .set_rxfh_indir = mlx4_en_set_rxfh_indir, + .get_rxfh = mlx4_en_get_rxfh, + .set_rxfh = mlx4_en_set_rxfh, .get_channels = mlx4_en_get_channels, .set_channels = mlx4_en_set_channels, .get_ts_info = mlx4_en_get_ts_info, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 0de8b07c24c2..74739c4b9997 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) 0 : ARRAY_SIZE(efx->rx_indir_table)); } -static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) +static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) return 0; } -static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, - const u32 *indir) +static int efx_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, - .get_rxfh_indir = efx_ethtool_get_rxfh_indir, - .set_rxfh_indir = efx_ethtool_set_rxfh_indir, + .get_rxfh = efx_ethtool_get_rxfh, + .set_rxfh = efx_ethtool_set_rxfh, .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 00e120296e92..9396cca93b09 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -579,7 +579,7 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev) } static int -vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) +vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; @@ -592,7 +592,7 @@ vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) } static int -vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) +vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key) { unsigned int i; unsigned long flags; @@ -628,8 +628,8 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, - .get_rxfh_indir = vmxnet3_get_rss_indir, - .set_rxfh_indir = vmxnet3_set_rss_indir, + .get_rxfh = vmxnet3_get_rss, + .set_rxfh = vmxnet3_set_rss, #endif }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 874fde01d398..e658229fee39 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -158,15 +158,11 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. - * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. - * Will not be called if @get_rxfh_indir_size returns zero. * @get_rxfh: Get the contents of the RX flow hash indirection table and hash * key. * Will only be called if one or both of @get_rxfh_indir_size and * @get_rxfh_key_size are implemented and return non-zero. * Returns a negative error code or zero. - * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. - * Will not be called if @get_rxfh_indir_size returns zero. * @set_rxfh: Set the contents of the RX flow hash indirection table and/or * hash key. In case only the indirection table or hash key is to be * changed, the other argument will be %NULL. @@ -248,8 +244,6 @@ struct ethtool_ops { int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); int (*set_rxfh)(struct net_device *, const u32 *indir, const u8 *key); - int (*get_rxfh_indir)(struct net_device *, u32 *); - int (*set_rxfh_indir)(struct net_device *, const u32 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b8857348bdf3..8ae452afb545 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -582,7 +582,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, int ret; if (!dev->ethtool_ops->get_rxfh_indir_size || - !dev->ethtool_ops->get_rxfh_indir) + !dev->ethtool_ops->get_rxfh) return -EOPNOTSUPP; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) @@ -608,7 +608,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, if (!indir) return -ENOMEM; - ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); + ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL); if (ret) goto out; @@ -632,7 +632,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, int ret; u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); - if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir || + if (!ops->get_rxfh_indir_size || !ops->set_rxfh || !ops->get_rxnfc) return -EOPNOTSUPP; @@ -669,7 +669,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, goto out; } - ret = ops->set_rxfh_indir(dev, indir); + ret = ops->set_rxfh(dev, indir, NULL); out: kfree(indir); -- cgit v1.2.3-59-g8ed1b From f062a3844845d267e3716cbc188ad502a15898b7 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 15 May 2014 16:28:07 +0100 Subject: ethtool: Check that reserved fields of struct ethtool_rxfh are 0 We should fail rather than silently ignoring use of these extensions. Signed-off-by: Ben Hutchings --- net/core/ethtool.c | 89 ++++++++++++++++++++++-------------------------------- 1 file changed, 36 insertions(+), 53 deletions(-) (limited to 'net/core') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 8ae452afb545..17cb912793fa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -681,11 +681,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, { int ret; const struct ethtool_ops *ops = dev->ethtool_ops; - u32 user_indir_size = 0, user_key_size = 0; + u32 user_indir_size, user_key_size; u32 dev_indir_size = 0, dev_key_size = 0; + struct ethtool_rxfh rxfh; u32 total_size; - u32 indir_offset, indir_bytes; - u32 key_offset; + u32 indir_bytes; u32 *indir = NULL; u8 *hkey = NULL; u8 *rss_config; @@ -697,33 +697,24 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, if (ops->get_rxfh_indir_size) dev_indir_size = ops->get_rxfh_indir_size(dev); - - indir_offset = offsetof(struct ethtool_rxfh, indir_size); - - if (copy_from_user(&user_indir_size, - useraddr + indir_offset, - sizeof(user_indir_size))) - return -EFAULT; - - if (copy_to_user(useraddr + indir_offset, - &dev_indir_size, sizeof(dev_indir_size))) - return -EFAULT; - if (ops->get_rxfh_key_size) dev_key_size = ops->get_rxfh_key_size(dev); if ((dev_key_size + dev_indir_size) == 0) return -EOPNOTSUPP; - key_offset = offsetof(struct ethtool_rxfh, key_size); - - if (copy_from_user(&user_key_size, - useraddr + key_offset, - sizeof(user_key_size))) + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) return -EFAULT; + user_indir_size = rxfh.indir_size; + user_key_size = rxfh.key_size; - if (copy_to_user(useraddr + key_offset, - &dev_key_size, sizeof(dev_key_size))) + /* Check that reserved fields are 0 for now */ + if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1]) + return -EINVAL; + + rxfh.indir_size = dev_indir_size; + rxfh.key_size = dev_key_size; + if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) return -EFAULT; /* If the user buffer size is 0, this is just a query for the @@ -768,12 +759,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, int ret; const struct ethtool_ops *ops = dev->ethtool_ops; struct ethtool_rxnfc rx_rings; - u32 user_indir_size = 0, dev_indir_size = 0, i; - u32 user_key_size = 0, dev_key_size = 0; + struct ethtool_rxfh rxfh; + u32 dev_indir_size = 0, dev_key_size = 0, i; u32 *indir = NULL, indir_bytes = 0; u8 *hkey = NULL; u8 *rss_config; - u32 indir_offset, key_offset; u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) || @@ -782,40 +772,33 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, if (ops->get_rxfh_indir_size) dev_indir_size = ops->get_rxfh_indir_size(dev); - - indir_offset = offsetof(struct ethtool_rxfh, indir_size); - if (copy_from_user(&user_indir_size, - useraddr + indir_offset, - sizeof(user_indir_size))) - return -EFAULT; - if (ops->get_rxfh_key_size) dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev); - if ((dev_key_size + dev_indir_size) == 0) return -EOPNOTSUPP; - key_offset = offsetof(struct ethtool_rxfh, key_size); - if (copy_from_user(&user_key_size, - useraddr + key_offset, - sizeof(user_key_size))) + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) return -EFAULT; + /* Check that reserved fields are 0 for now */ + if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1]) + return -EINVAL; + /* If either indir or hash key is valid, proceed further. * It is not valid to request that both be unchanged. */ - if ((user_indir_size && - user_indir_size != ETH_RXFH_INDIR_NO_CHANGE && - user_indir_size != dev_indir_size) || - (user_key_size && (user_key_size != dev_key_size)) || - (user_indir_size == ETH_RXFH_INDIR_NO_CHANGE && - user_key_size == 0)) + if ((rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && + rxfh.indir_size != dev_indir_size) || + (rxfh.key_size && (rxfh.key_size != dev_key_size)) || + (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && + rxfh.key_size == 0)) return -EINVAL; - if (user_indir_size != ETH_RXFH_INDIR_NO_CHANGE) + if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) indir_bytes = dev_indir_size * sizeof(indir[0]); - rss_config = kzalloc(indir_bytes + user_key_size, GFP_USER); + rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER); if (!rss_config) return -ENOMEM; @@ -824,29 +807,29 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, if (ret) goto out; - /* user_indir_size == 0 means reset the indir table to default. - * user_indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. + /* rxfh.indir_size == 0 means reset the indir table to default. + * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. */ - if (user_indir_size && - user_indir_size != ETH_RXFH_INDIR_NO_CHANGE) { + if (rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { indir = (u32 *)rss_config; ret = ethtool_copy_validate_indir(indir, useraddr + rss_cfg_offset, &rx_rings, - user_indir_size); + rxfh.indir_size); if (ret) goto out; - } else if (user_indir_size == 0) { + } else if (rxfh.indir_size == 0) { indir = (u32 *)rss_config; for (i = 0; i < dev_indir_size; i++) indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); } - if (user_key_size) { + if (rxfh.key_size) { hkey = rss_config + indir_bytes; if (copy_from_user(hkey, useraddr + rss_cfg_offset + indir_bytes, - user_key_size)) { + rxfh.key_size)) { ret = -EFAULT; goto out; } -- cgit v1.2.3-59-g8ed1b From e51fb152318ee6502a2d224771b0bbbbda046128 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 3 Jun 2014 16:40:47 -0700 Subject: rtnetlink: fix a memory leak when ->newlink fails It is possible that ->newlink() fails before registering the device, in this case we should just free it, it's safe to call free_netdev(). Fixes: commit 0e0eee2465df77bcec2 (net: correct error path in rtnl_newlink()) Cc: David S. Miller Cc: Eric Dumazet Signed-off-by: Cong Wang Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'net/core') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2d8d8fcfa060..f4e9037f9a0c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2019,11 +2019,15 @@ replay: if (ops->newlink) { err = ops->newlink(net, dev, tb, data); /* Drivers should call free_netdev() in ->destructor - * and unregister it on failure so that device could be - * finally freed in rtnl_unlock. + * and unregister it on failure after registration + * so that device could be finally freed in rtnl_unlock. */ - if (err < 0) + if (err < 0) { + /* If device is not registered at all, free it now */ + if (dev->reg_state == NETREG_UNINITIALIZED) + free_netdev(dev); goto out; + } } else { err = register_netdevice(dev); if (err < 0) { -- cgit v1.2.3-59-g8ed1b From 92ff71b8fe9cd9c673615fc6f3870af7376d7c84 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 3 Jun 2014 17:11:54 -0700 Subject: net: remove some unless free on failure in alloc_netdev_mqs() When we jump to free_pcpu on failure in alloc_netdev_mqs() rx and tx queues are not yet allocated, so no need to free them. Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/core/dev.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'net/core') diff --git a/net/core/dev.c b/net/core/dev.c index 0355ca5d2924..1ba2cfe3f8e8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6503,11 +6503,6 @@ free_all: free_pcpu: free_percpu(dev->pcpu_refcnt); - netif_free_tx_queues(dev); -#ifdef CONFIG_SYSFS - kfree(dev->_rx); -#endif - free_dev: netdev_freemem(dev); return NULL; -- cgit v1.2.3-59-g8ed1b From 7e2b10c1e52ca37fb522be49f4be367f9311d0cd Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 4 Jun 2014 17:20:02 -0700 Subject: net: Support for multiple checksums with gso When creating a GSO packet segment we may need to set more than one checksum in the packet (for instance a TCP checksum and UDP checksum for VXLAN encapsulation). To be efficient, we want to do checksum calculation for any part of the packet at most once. This patch adds csum_start offset to skb_gso_cb. This tracks the starting offset for skb->csum which is initially set in skb_segment. When a protocol needs to compute a transport checksum it calls gso_make_checksum which computes the checksum value from the start of transport header to csum_start and then adds in skb->csum to get the full checksum. skb->csum and csum_start are then updated to reflect the checksum of the resultant packet starting from the transport header. This patch also adds a flag to skbuff, encap_hdr_csum, which is set in *gso_segment fucntions to indicate that a tunnel protocol needs checksum calculation Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 26 +++++++++++++++++++++++++- net/core/skbuff.c | 8 +++++++- net/ipv4/ip_tunnel_core.c | 8 ++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7a9beeb1c458..d8d397acb52c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -567,7 +567,8 @@ struct sk_buff { * headers if needed */ __u8 encapsulation:1; - /* 6/8 bit hole (depending on ndisc_nodetype presence) */ + __u8 encap_hdr_csum:1; + /* 5/7 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL @@ -2988,6 +2989,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) struct skb_gso_cb { int mac_offset; int encap_level; + __u16 csum_start; }; #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) @@ -3012,6 +3014,28 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) return 0; } +/* Compute the checksum for a gso segment. First compute the checksum value + * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and + * then add in skb->csum (checksum from csum_start to end of packet). + * skb->csum and csum_start are then updated to reflect the checksum of the + * resultant packet starting from the transport header-- the resultant checksum + * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo + * header. + */ +static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) +{ + int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - + skb_transport_offset(skb); + __u16 csum; + + csum = csum_fold(csum_partial(skb_transport_header(skb), + plen, skb->csum)); + skb->csum = res; + SKB_GSO_CB(skb)->csum_start -= plen; + + return csum; +} + static inline bool skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3f6c7e8be8a4..05f4bef2ce12 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2885,7 +2885,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, if (unlikely(!proto)) return ERR_PTR(-EINVAL); - csum = !!can_checksum_protocol(features, proto); + csum = !head_skb->encap_hdr_csum && + !!can_checksum_protocol(features, proto); + __skb_push(head_skb, doffset); headroom = skb_headroom(head_skb); pos = skb_headlen(head_skb); @@ -2983,6 +2985,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, nskb->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), len, 0); + SKB_GSO_CB(nskb)->csum_start = + skb_headroom(nskb) + offset; continue; } @@ -3052,6 +3056,8 @@ perform_csum_check: nskb->csum = skb_checksum(nskb, doffset, nskb->len - doffset, 0); nskb->ip_summed = CHECKSUM_NONE; + SKB_GSO_CB(nskb)->csum_start = + skb_headroom(nskb) + doffset; } } while ((offset += len) < head_skb->len); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 847e69cbff7e..f4c987bb7e94 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -135,6 +135,14 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, return skb; } + /* If packet is not gso and we are resolving any partial checksum, + * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL + * on the outer header without confusing devices that implement + * NETIF_F_IP_CSUM with encapsulation. + */ + if (csum_help) + skb->encapsulation = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { err = skb_checksum_help(skb); if (unlikely(err)) -- cgit v1.2.3-59-g8ed1b From 4cb28970a23ff209199b0a4358d68efe82c8f493 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 2 Jun 2014 15:55:22 -0700 Subject: net: use the new API kvfree() It is available since v3.15-rc5. Cc: Pablo Neira Ayuso Cc: "David S. Miller" Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/core/dev.c | 10 ++-------- net/ipv4/tcp_metrics.c | 5 +---- net/netfilter/ipset/ip_set_core.c | 5 +---- net/netfilter/nft_hash.c | 5 +---- net/netfilter/xt_recent.c | 5 +---- net/sched/sch_choke.c | 7 +------ net/sched/sch_fq.c | 5 +---- net/sched/sch_fq_codel.c | 7 +------ net/sched/sch_hhf.c | 7 +------ net/sched/sch_netem.c | 7 +------ net/sched/sch_sfq.c | 7 +------ 11 files changed, 12 insertions(+), 58 deletions(-) (limited to 'net/core') diff --git a/net/core/dev.c b/net/core/dev.c index 5367bfba0947..a9a08e4a4857 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5933,10 +5933,7 @@ static void netdev_init_one_queue(struct net_device *dev, static void netif_free_tx_queues(struct net_device *dev) { - if (is_vmalloc_addr(dev->_tx)) - vfree(dev->_tx); - else - kfree(dev->_tx); + kvfree(dev->_tx); } static int netif_alloc_netdev_queues(struct net_device *dev) @@ -6410,10 +6407,7 @@ void netdev_freemem(struct net_device *dev) { char *addr = (char *)dev - dev->padded; - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } /** diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index dcaf72f10216..4fe041805989 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -1159,10 +1159,7 @@ static void __net_exit tcp_net_metrics_exit(struct net *net) tm = next; } } - if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) - vfree(net->ipv4.tcp_metrics_hash); - else - kfree(net->ipv4.tcp_metrics_hash); + kvfree(net->ipv4.tcp_metrics_hash); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 117208321f16..ec8114fae50b 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -271,10 +271,7 @@ ip_set_free(void *members) { pr_debug("%p: free with %s\n", members, is_vmalloc_addr(members) ? "vfree" : "kfree"); - if (is_vmalloc_addr(members)) - vfree(members); - else - kfree(members); + kvfree(members); } EXPORT_SYMBOL_GPL(ip_set_free); diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 1dfeb6786832..4080ed6a072b 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -76,10 +76,7 @@ static bool nft_hash_lookup(const struct nft_set *set, static void nft_hash_tbl_free(const struct nft_hash_table *tbl) { - if (is_vmalloc_addr(tbl)) - vfree(tbl); - else - kfree(tbl); + kvfree(tbl); } static unsigned int nft_hash_tbl_size(unsigned int nelem) diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 1e657cf715c4..a9faae89f955 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -313,10 +313,7 @@ out: static void recent_table_free(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } static int recent_mt_check(const struct xt_mtchk_param *par, diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 2aee02802c27..ed30e436128b 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -391,12 +391,7 @@ static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { static void choke_free(void *addr) { - if (addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); - } + kvfree(addr); } static int choke_change(struct Qdisc *sch, struct nlattr *opt) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 23c682b42f99..ba32c2b005d0 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -591,10 +591,7 @@ static void *fq_alloc_node(size_t sz, int node) static void fq_free(void *addr) { - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } static int fq_resize(struct Qdisc *sch, u32 log) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 0bf432c782c1..063b726bf1f8 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -365,12 +365,7 @@ static void *fq_codel_zalloc(size_t sz) static void fq_codel_free(void *addr) { - if (addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); - } + kvfree(addr); } static void fq_codel_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 6aab8619bbb0..d85b6812a7d4 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -494,12 +494,7 @@ static void *hhf_zalloc(size_t sz) static void hhf_free(void *addr) { - if (addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); - } + kvfree(addr); } static void hhf_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index f1669a00f571..111d70fddaea 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -648,12 +648,7 @@ static void netem_reset(struct Qdisc *sch) static void dist_free(struct disttable *d) { - if (d) { - if (is_vmalloc_addr(d)) - vfree(d); - else - kfree(d); - } + kvfree(d); } /* diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 87317ff0b4ec..1af2f73906d0 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -716,12 +716,7 @@ static void *sfq_alloc(size_t sz) static void sfq_free(void *addr) { - if (addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); - } + kvfree(addr); } static void sfq_destroy(struct Qdisc *sch) -- cgit v1.2.3-59-g8ed1b From 3b392ddba25a95dcf5fb30b33358961c49dd5cfc Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 4 Jun 2014 08:53:17 +0900 Subject: MPLS: Use mpls_features to activate software MPLS GSO segmentation If an MPLS packet requires segmentation then use mpls_features to determine if the software implementation should be used. As no driver advertises MPLS GSO segmentation this will always be the case. I had not noticed that this was necessary before as software MPLS GSO segmentation was already being used in my test environment. I believe that the reason for that is the skbs in question always had fragments and the driver I used does not advertise NETIF_F_FRAGLIST (which seems to be the case for most drivers). Thus software segmentation was activated by skb_gso_ok(). This introduces the overhead of an extra call to skb_network_protocol() in the case where where CONFIG_NET_MPLS_GSO is set and skb->ip_summed == CHECKSUM_NONE. Thanks to Jesse Gross for prompting me to investigate this. Signed-off-by: Simon Horman Acked-by: YAMAMOTO Takashi Acked-by: Thomas Graf Signed-off-by: David S. Miller --- net/core/dev.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/dev.c b/net/core/dev.c index a9a08e4a4857..ed8fe62d41af 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2513,13 +2513,39 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) return 0; } +/* If MPLS offload request, verify we are testing hardware MPLS features + * instead of standard features for the netdev. + */ +#ifdef CONFIG_NET_MPLS_GSO +static netdev_features_t net_mpls_features(struct sk_buff *skb, + netdev_features_t features, + __be16 type) +{ + if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC)) + features &= skb->dev->mpls_features; + + return features; +} +#else +static netdev_features_t net_mpls_features(struct sk_buff *skb, + netdev_features_t features, + __be16 type) +{ + return features; +} +#endif + static netdev_features_t harmonize_features(struct sk_buff *skb, netdev_features_t features) { int tmp; + __be16 type; + + type = skb_network_protocol(skb, &tmp); + features = net_mpls_features(skb, features, type); if (skb->ip_summed != CHECKSUM_NONE && - !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { + !can_checksum_protocol(features, type)) { features &= ~NETIF_F_ALL_CSUM; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; -- cgit v1.2.3-59-g8ed1b From 0dcceabb0c1bf2d4c12a748df9933fad303072a7 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 Jun 2014 14:39:36 -0700 Subject: net: filter: fix SKF_AD_PKTTYPE extension on big-endian BPF classic->internal converter broke SKF_AD_PKTTYPE extension, since pkt_type_offset() was failing to find skb->pkt_type field which is defined as: __u8 pkt_type:3, fclone:2, ipvs_property:1, peeked:1, nf_trace:1; Fix it by searching for 3 most significant bits and shift them by 5 at run-time Fixes: bd4cf0ed331a ("net: filter: rework/optimize internal BPF interpreter's instruction set") Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Tested-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/core/filter.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index 4aec7b93f1a9..ab3c74e49f07 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -578,7 +578,11 @@ EXPORT_SYMBOL_GPL(sk_run_filter_int_skb); * to make sure its still a 3bit field starting at a byte boundary; * taken from arch/x86/net/bpf_jit_comp.c. */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else #define PKT_TYPE_MAX 7 +#endif static unsigned int pkt_type_offset(void) { struct sk_buff skb_probe = { .pkt_type = ~0, }; @@ -685,6 +689,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp, insn->code = BPF_ALU | BPF_AND | BPF_K; insn->a_reg = A_REG; insn->imm = PKT_TYPE_MAX; +#ifdef __BIG_ENDIAN_BITFIELD + insn++; + + insn->code = BPF_ALU | BPF_RSH | BPF_K; + insn->a_reg = A_REG; + insn->imm = 5; +#endif break; case SKF_AD_OFF + SKF_AD_IFINDEX: -- cgit v1.2.3-59-g8ed1b From 87757a917b0b3c0787e0563c679762152be81312 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 Jun 2014 06:44:03 -0700 Subject: net: force a list_del() in unregister_netdevice_many() unregister_netdevice_many() API is error prone and we had too many bugs because of dangling LIST_HEAD on stacks. See commit f87e6f47933e3e ("net: dont leave active on stack LIST_HEAD") In fact, instead of making sure no caller leaves an active list_head, just force a list_del() in the callee. No one seems to need to access the list after unregister_netdevice_many() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 1 - net/core/dev.c | 5 ++++- net/core/rtnetlink.c | 1 - net/mac80211/iface.c | 1 - 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'net/core') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d53e299ae1d9..7eec598c5cb6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1036,7 +1036,6 @@ static int macvlan_device_event(struct notifier_block *unused, list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); - list_del(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ diff --git a/net/core/dev.c b/net/core/dev.c index fb8b0546485b..a30bef1882f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6613,6 +6613,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue); /** * unregister_netdevice_many - unregister many devices * @head: list of devices + * + * Note: As most callers use a stack allocated list_head, + * we force a list_del() to make sure stack wont be corrupted later. */ void unregister_netdevice_many(struct list_head *head) { @@ -6622,6 +6625,7 @@ void unregister_netdevice_many(struct list_head *head) rollback_registered_many(head); list_for_each_entry(dev, head, unreg_list) net_set_todo(dev); + list_del(head); } } EXPORT_SYMBOL(unregister_netdevice_many); @@ -7077,7 +7081,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) } } unregister_netdevice_many(&dev_kill_list); - list_del(&dev_kill_list); rtnl_unlock(); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f4e9037f9a0c..fbdb1556b0db 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1744,7 +1744,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) ops->dellink(dev, &list_kill); unregister_netdevice_many(&list_kill); - list_del(&list_kill); return 0; } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b8d331e7d883..34799e06ee01 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1758,7 +1758,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); - list_del(&unreg_list); list_for_each_entry_safe(sdata, tmp, &wdev_list, list) { list_del(&sdata->list); -- cgit v1.2.3-59-g8ed1b From e430f34ee5192c84bcabd3c79ab7e2388b5eec74 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 6 Jun 2014 14:46:06 -0700 Subject: net: filter: cleanup A/X name usage The macro 'A' used in internal BPF interpreter: #define A regs[insn->a_reg] was easily confused with the name of classic BPF register 'A', since 'A' would mean two different things depending on context. This patch is trying to clean up the naming and clarify its usage in the following way: - A and X are names of two classic BPF registers - BPF_REG_A denotes internal BPF register R0 used to map classic register A in internal BPF programs generated from classic - BPF_REG_X denotes internal BPF register R7 used to map classic register X in internal BPF programs generated from classic - internal BPF instruction format: struct sock_filter_int { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; - BPF_X/BPF_K is 1 bit used to encode source operand of instruction In classic: BPF_X - means use register X as source operand BPF_K - means use 32-bit immediate as source operand In internal: BPF_X - means use 'src_reg' register as source operand BPF_K - means use 32-bit immediate as source operand Suggested-by: Chema Gonzalez Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Acked-by: Chema Gonzalez Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 2 +- arch/x86/net/bpf_jit_comp.c | 260 ++++++++++++++++++------------------ include/linux/filter.h | 156 ++++++++++++---------- net/core/filter.c | 198 +++++++++++++-------------- 4 files changed, 314 insertions(+), 302 deletions(-) (limited to 'net/core') diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 58c443926647..9f49b8690500 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -805,7 +805,7 @@ to seccomp_data, for converted BPF filters R1 points to a skb. A program, that is translated internally consists of the following elements: - op:16, jt:8, jf:8, k:32 ==> op:8, a_reg:4, x_reg:4, off:16, imm:32 + op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32 So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field has room for new instructions. Some of them may use 16/24/32 byte encoding. New diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 080f3f071bb0..99bef86ed6df 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -64,10 +64,10 @@ static inline bool is_simm32(s64 value) return value == (s64) (s32) value; } -/* mov A, X */ -#define EMIT_mov(A, X) \ - do {if (A != X) \ - EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ +/* mov dst, src */ +#define EMIT_mov(DST, SRC) \ + do {if (DST != SRC) \ + EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ } while (0) static int bpf_size_to_x86_bytes(int bpf_size) @@ -194,16 +194,16 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) return byte; } -/* encode dest register 'a_reg' into x64 opcode 'byte' */ -static inline u8 add_1reg(u8 byte, u32 a_reg) +/* encode 'dst_reg' register into x64 opcode 'byte' */ +static inline u8 add_1reg(u8 byte, u32 dst_reg) { - return byte + reg2hex[a_reg]; + return byte + reg2hex[dst_reg]; } -/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ -static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) +/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ +static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { - return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); + return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } struct jit_context { @@ -286,9 +286,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, } for (i = 0; i < insn_cnt; i++, insn++) { - const s32 K = insn->imm; - u32 a_reg = insn->a_reg; - u32 x_reg = insn->x_reg; + const s32 imm32 = insn->imm; + u32 dst_reg = insn->dst_reg; + u32 src_reg = insn->src_reg; u8 b1 = 0, b2 = 0, b3 = 0; s64 jmp_offset; u8 jmp_cond; @@ -315,32 +315,32 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_XOR: b2 = 0x31; break; } if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_2mod(0x48, a_reg, x_reg)); - else if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT1(add_2mod(0x40, a_reg, x_reg)); - EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); break; - /* mov A, X */ + /* mov dst, src */ case BPF_ALU64 | BPF_MOV | BPF_X: - EMIT_mov(a_reg, x_reg); + EMIT_mov(dst_reg, src_reg); break; - /* mov32 A, X */ + /* mov32 dst, src */ case BPF_ALU | BPF_MOV | BPF_X: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT1(add_2mod(0x40, a_reg, x_reg)); - EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); break; - /* neg A */ + /* neg dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); - EMIT2(0xF7, add_1reg(0xD8, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + EMIT2(0xF7, add_1reg(0xD8, dst_reg)); break; case BPF_ALU | BPF_ADD | BPF_K: @@ -354,9 +354,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); switch (BPF_OP(insn->code)) { case BPF_ADD: b3 = 0xC0; break; @@ -366,10 +366,10 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_XOR: b3 = 0xF0; break; } - if (is_imm8(K)) - EMIT3(0x83, add_1reg(b3, a_reg), K); + if (is_imm8(imm32)) + EMIT3(0x83, add_1reg(b3, dst_reg), imm32); else - EMIT2_off32(0x81, add_1reg(b3, a_reg), K); + EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU64 | BPF_MOV | BPF_K: @@ -377,23 +377,23 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, * use 'mov eax, imm32' (which zero-extends imm32) * to save 2 bytes */ - if (K < 0) { + if (imm32 < 0) { /* 'mov rax, imm32' sign extends imm32 */ - b1 = add_1mod(0x48, a_reg); + b1 = add_1mod(0x48, dst_reg); b2 = 0xC7; b3 = 0xC0; - EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); + EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); break; } case BPF_ALU | BPF_MOV | BPF_K: /* mov %eax, imm32 */ - if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); - EMIT1_off32(add_1reg(0xB8, a_reg), K); + if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); break; - /* A %= X, A /= X, A %= K, A /= K */ + /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: @@ -406,14 +406,14 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x52); /* push rdx */ if (BPF_SRC(insn->code) == BPF_X) - /* mov r11, X */ - EMIT_mov(AUX_REG, x_reg); + /* mov r11, src_reg */ + EMIT_mov(AUX_REG, src_reg); else - /* mov r11, K */ - EMIT3_off32(0x49, 0xC7, 0xC3, K); + /* mov r11, imm32 */ + EMIT3_off32(0x49, 0xC7, 0xC3, imm32); - /* mov rax, A */ - EMIT_mov(BPF_REG_0, a_reg); + /* mov rax, dst_reg */ + EMIT_mov(BPF_REG_0, dst_reg); /* xor edx, edx * equivalent to 'xor rdx, rdx', but one byte less @@ -421,7 +421,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT2(0x31, 0xd2); if (BPF_SRC(insn->code) == BPF_X) { - /* if (X == 0) return 0 */ + /* if (src_reg == 0) return 0 */ /* cmp r11, 0 */ EMIT4(0x49, 0x83, 0xFB, 0x00); @@ -457,8 +457,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x5A); /* pop rdx */ EMIT1(0x58); /* pop rax */ - /* mov A, r11 */ - EMIT_mov(a_reg, AUX_REG); + /* mov dst_reg, r11 */ + EMIT_mov(dst_reg, AUX_REG); break; case BPF_ALU | BPF_MUL | BPF_K: @@ -468,15 +468,15 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x50); /* push rax */ EMIT1(0x52); /* push rdx */ - /* mov r11, A */ - EMIT_mov(AUX_REG, a_reg); + /* mov r11, dst_reg */ + EMIT_mov(AUX_REG, dst_reg); if (BPF_SRC(insn->code) == BPF_X) - /* mov rax, X */ - EMIT_mov(BPF_REG_0, x_reg); + /* mov rax, src_reg */ + EMIT_mov(BPF_REG_0, src_reg); else - /* mov rax, K */ - EMIT3_off32(0x48, 0xC7, 0xC0, K); + /* mov rax, imm32 */ + EMIT3_off32(0x48, 0xC7, 0xC0, imm32); if (BPF_CLASS(insn->code) == BPF_ALU64) EMIT1(add_1mod(0x48, AUX_REG)); @@ -491,8 +491,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x5A); /* pop rdx */ EMIT1(0x58); /* pop rax */ - /* mov A, r11 */ - EMIT_mov(a_reg, AUX_REG); + /* mov dst_reg, r11 */ + EMIT_mov(dst_reg, AUX_REG); break; /* shifts */ @@ -503,39 +503,39 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); switch (BPF_OP(insn->code)) { case BPF_LSH: b3 = 0xE0; break; case BPF_RSH: b3 = 0xE8; break; case BPF_ARSH: b3 = 0xF8; break; } - EMIT3(0xC1, add_1reg(b3, a_reg), K); + EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU | BPF_END | BPF_FROM_BE: - switch (K) { + switch (imm32) { case 16: /* emit 'ror %ax, 8' to swap lower 2 bytes */ EMIT1(0x66); - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT1(0x41); - EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); + EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); break; case 32: /* emit 'bswap eax' to swap lower 4 bytes */ - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0x0F); else EMIT1(0x0F); - EMIT1(add_1reg(0xC8, a_reg)); + EMIT1(add_1reg(0xC8, dst_reg)); break; case 64: /* emit 'bswap rax' to swap 8 bytes */ - EMIT3(add_1mod(0x48, a_reg), 0x0F, - add_1reg(0xC8, a_reg)); + EMIT3(add_1mod(0x48, dst_reg), 0x0F, + add_1reg(0xC8, dst_reg)); break; } break; @@ -543,117 +543,117 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU | BPF_END | BPF_FROM_LE: break; - /* ST: *(u8*)(a_reg + off) = imm */ + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0xC6); else EMIT1(0xC6); goto st; case BPF_ST | BPF_MEM | BPF_H: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT3(0x66, 0x41, 0xC7); else EMIT2(0x66, 0xC7); goto st; case BPF_ST | BPF_MEM | BPF_W: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0xC7); else EMIT1(0xC7); goto st; case BPF_ST | BPF_MEM | BPF_DW: - EMIT2(add_1mod(0x48, a_reg), 0xC7); + EMIT2(add_1mod(0x48, dst_reg), 0xC7); st: if (is_imm8(insn->off)) - EMIT2(add_1reg(0x40, a_reg), insn->off); + EMIT2(add_1reg(0x40, dst_reg), insn->off); else - EMIT1_off32(add_1reg(0x80, a_reg), insn->off); + EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); - EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); + EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); break; - /* STX: *(u8*)(a_reg + off) = x_reg */ + /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: /* emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(a_reg) || is_ereg(x_reg) || + if (is_ereg(dst_reg) || is_ereg(src_reg) || /* have to add extra byte for x86 SIL, DIL regs */ - x_reg == BPF_REG_1 || x_reg == BPF_REG_2) - EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); + src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else EMIT1(0x88); goto stx; case BPF_STX | BPF_MEM | BPF_H: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT2(0x66, 0x89); goto stx; case BPF_STX | BPF_MEM | BPF_W: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT1(0x89); goto stx; case BPF_STX | BPF_MEM | BPF_DW: - EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); + EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); stx: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), insn->off); break; - /* LDX: a_reg = *(u8*)(x_reg + off) */ + /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: /* emit 'movzx rax, byte ptr [rax + off]' */ - EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); goto ldx; case BPF_LDX | BPF_MEM | BPF_H: /* emit 'movzx rax, word ptr [rax + off]' */ - EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); goto ldx; case BPF_LDX | BPF_MEM | BPF_W: /* emit 'mov eax, dword ptr [rax+0x14]' */ - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); else EMIT1(0x8B); goto ldx; case BPF_LDX | BPF_MEM | BPF_DW: /* emit 'mov rax, qword ptr [rax+0x14]' */ - EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); ldx: /* if insn->off == 0 we can save one extra byte, but * special case of x86 r13 which always needs an offset * is not worth the hassle */ if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); + EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, x_reg, a_reg), + EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), insn->off); break; - /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ + /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ case BPF_STX | BPF_XADD | BPF_W: /* emit 'lock add dword ptr [rax + off], eax' */ - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); else EMIT2(0xF0, 0x01); goto xadd; case BPF_STX | BPF_XADD | BPF_DW: - EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); + EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); xadd: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), insn->off); break; /* call */ case BPF_JMP | BPF_CALL: - func = (u8 *) __bpf_call_base + K; + func = (u8 *) __bpf_call_base + imm32; jmp_offset = func - (image + addrs[i]); if (ctx->seen_ld_abs) { EMIT2(0x41, 0x52); /* push %r10 */ @@ -663,9 +663,9 @@ xadd: if (is_imm8(insn->off)) */ jmp_offset += 4; } - if (!K || !is_simm32(jmp_offset)) { + if (!imm32 || !is_simm32(jmp_offset)) { pr_err("unsupported bpf func %d addr %p image %p\n", - K, func, image); + imm32, func, image); return -EINVAL; } EMIT1_off32(0xE8, jmp_offset); @@ -682,21 +682,21 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: - /* cmp a_reg, x_reg */ - EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, - add_2reg(0xC0, a_reg, x_reg)); + /* cmp dst_reg, src_reg */ + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: - /* test a_reg, x_reg */ - EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, - add_2reg(0xC0, a_reg, x_reg)); + /* test dst_reg, src_reg */ + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - /* test a_reg, imm32 */ - EMIT1(add_1mod(0x48, a_reg)); - EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); + /* test dst_reg, imm32 */ + EMIT1(add_1mod(0x48, dst_reg)); + EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; case BPF_JMP | BPF_JEQ | BPF_K: @@ -705,13 +705,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: - /* cmp a_reg, imm8/32 */ - EMIT1(add_1mod(0x48, a_reg)); + /* cmp dst_reg, imm8/32 */ + EMIT1(add_1mod(0x48, dst_reg)); - if (is_imm8(K)) - EMIT3(0x83, add_1reg(0xF8, a_reg), K); + if (is_imm8(imm32)) + EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); else - EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); + EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); emit_cond_jmp: /* convert BPF opcode to x86 */ switch (BPF_OP(insn->code)) { @@ -773,27 +773,27 @@ emit_jmp: func = sk_load_word; goto common_load; case BPF_LD | BPF_ABS | BPF_W: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); common_load: ctx->seen_ld_abs = true; jmp_offset = func - (image + addrs[i]); if (!func || !is_simm32(jmp_offset)) { pr_err("unsupported bpf func %d addr %p image %p\n", - K, func, image); + imm32, func, image); return -EINVAL; } if (BPF_MODE(insn->code) == BPF_ABS) { /* mov %esi, imm32 */ - EMIT1_off32(0xBE, K); + EMIT1_off32(0xBE, imm32); } else { - /* mov %rsi, x_reg */ - EMIT_mov(BPF_REG_2, x_reg); - if (K) { - if (is_imm8(K)) + /* mov %rsi, src_reg */ + EMIT_mov(BPF_REG_2, src_reg); + if (imm32) { + if (is_imm8(imm32)) /* add %esi, imm8 */ - EMIT3(0x83, 0xC6, K); + EMIT3(0x83, 0xC6, imm32); else /* add %esi, imm32 */ - EMIT2_off32(0x81, 0xC6, K); + EMIT2_off32(0x81, 0xC6, imm32); } } /* skb pointer is in R6 (%rbx), it will be copied into @@ -808,13 +808,13 @@ common_load: ctx->seen_ld_abs = true; func = sk_load_half; goto common_load; case BPF_LD | BPF_ABS | BPF_H: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); goto common_load; case BPF_LD | BPF_IND | BPF_B: func = sk_load_byte; goto common_load; case BPF_LD | BPF_ABS | BPF_B: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); goto common_load; case BPF_JMP | BPF_EXIT: diff --git a/include/linux/filter.h b/include/linux/filter.h index f0c2ad43b4af..a7e3c48d73a7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -78,161 +78,173 @@ enum { /* Helper macros for filter block array initializers. */ -/* ALU ops on registers, bpf_add|sub|...: A += X */ +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ -#define BPF_ALU64_REG(OP, A, X) \ +#define BPF_ALU64_REG(OP, DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -#define BPF_ALU32_REG(OP, A, X) \ +#define BPF_ALU32_REG(OP, DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -/* ALU ops on immediates, bpf_add|sub|...: A += IMM */ +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ -#define BPF_ALU64_IMM(OP, A, IMM) \ +#define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -#define BPF_ALU32_IMM(OP, A, IMM) \ +#define BPF_ALU32_IMM(OP, DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ -#define BPF_ENDIAN(TYPE, A, LEN) \ +#define BPF_ENDIAN(TYPE, DST, LEN) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = LEN }) -/* Short form of mov, A = X */ +/* Short form of mov, dst_reg = src_reg */ -#define BPF_MOV64_REG(A, X) \ +#define BPF_MOV64_REG(DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -#define BPF_MOV32_REG(A, X) \ +#define BPF_MOV32_REG(DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -/* Short form of mov, A = IMM */ +/* Short form of mov, dst_reg = imm32 */ -#define BPF_MOV64_IMM(A, IMM) \ +#define BPF_MOV64_IMM(DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -#define BPF_MOV32_IMM(A, IMM) \ +#define BPF_MOV32_IMM(DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */ +/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ -#define BPF_MOV64_RAW(TYPE, A, X, IMM) \ +#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = IMM }) -#define BPF_MOV32_RAW(TYPE, A, X, IMM) \ +#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = IMM }) -/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */ +/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ -#define BPF_LD_ABS(SIZE, OFF) \ +#define BPF_LD_ABS(SIZE, IMM) \ ((struct sock_filter_int) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ - .imm = OFF }) + .imm = IMM }) -/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */ +/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ -#define BPF_LD_IND(SIZE, X, OFF) \ +#define BPF_LD_IND(SIZE, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ - .a_reg = 0, \ - .x_reg = X, \ + .dst_reg = 0, \ + .src_reg = SRC, \ .off = 0, \ - .imm = OFF }) + .imm = IMM }) -/* Memory store, A = *(uint *) (X + OFF), and vice versa */ +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ -#define BPF_LDX_MEM(SIZE, A, X, OFF) \ +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -#define BPF_STX_MEM(SIZE, A, X, OFF) \ +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */ +/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ -#define BPF_JMP_REG(OP, A, X, OFF) \ +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */ +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ -#define BPF_JMP_IMM(OP, A, IMM, OFF) \ +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = OFF, \ .imm = IMM }) @@ -241,18 +253,18 @@ enum { #define BPF_EMIT_CALL(FUNC) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_CALL, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - __bpf_call_base) }) /* Raw code statement block */ -#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \ +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct sock_filter_int) { \ .code = CODE, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) @@ -261,8 +273,8 @@ enum { #define BPF_EXIT_INSN() \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_EXIT, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ .imm = 0 }) @@ -287,8 +299,8 @@ enum { struct sock_filter_int { __u8 code; /* opcode */ - __u8 a_reg:4; /* dest register */ - __u8 x_reg:4; /* source register */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; diff --git a/net/core/filter.c b/net/core/filter.c index 6bd2e350e751..b3f21751b238 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -59,12 +59,12 @@ #define BPF_R10 regs[BPF_REG_10] /* Named registers */ -#define A regs[insn->a_reg] -#define X regs[insn->x_reg] +#define DST regs[insn->dst_reg] +#define SRC regs[insn->src_reg] #define FP regs[BPF_REG_FP] #define ARG1 regs[BPF_REG_ARG1] #define CTX regs[BPF_REG_CTX] -#define K insn->imm +#define IMM insn->imm /* No hurry in this branch * @@ -264,7 +264,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; ARG1 = (u64) (unsigned long) ctx; - /* Register for user BPF programs need to be reset first. */ + /* Registers used in classic BPF programs need to be reset first. */ regs[BPF_REG_A] = 0; regs[BPF_REG_X] = 0; @@ -274,16 +274,16 @@ select_insn: /* ALU */ #define ALU(OPCODE, OP) \ ALU64_##OPCODE##_X: \ - A = A OP X; \ + DST = DST OP SRC; \ CONT; \ ALU_##OPCODE##_X: \ - A = (u32) A OP (u32) X; \ + DST = (u32) DST OP (u32) SRC; \ CONT; \ ALU64_##OPCODE##_K: \ - A = A OP K; \ + DST = DST OP IMM; \ CONT; \ ALU_##OPCODE##_K: \ - A = (u32) A OP (u32) K; \ + DST = (u32) DST OP (u32) IMM; \ CONT; ALU(ADD, +) @@ -296,92 +296,92 @@ select_insn: ALU(MUL, *) #undef ALU ALU_NEG: - A = (u32) -A; + DST = (u32) -DST; CONT; ALU64_NEG: - A = -A; + DST = -DST; CONT; ALU_MOV_X: - A = (u32) X; + DST = (u32) SRC; CONT; ALU_MOV_K: - A = (u32) K; + DST = (u32) IMM; CONT; ALU64_MOV_X: - A = X; + DST = SRC; CONT; ALU64_MOV_K: - A = K; + DST = IMM; CONT; ALU64_ARSH_X: - (*(s64 *) &A) >>= X; + (*(s64 *) &DST) >>= SRC; CONT; ALU64_ARSH_K: - (*(s64 *) &A) >>= K; + (*(s64 *) &DST) >>= IMM; CONT; ALU64_MOD_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = A; - A = do_div(tmp, X); + tmp = DST; + DST = do_div(tmp, SRC); CONT; ALU_MOD_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = (u32) A; - A = do_div(tmp, (u32) X); + tmp = (u32) DST; + DST = do_div(tmp, (u32) SRC); CONT; ALU64_MOD_K: - tmp = A; - A = do_div(tmp, K); + tmp = DST; + DST = do_div(tmp, IMM); CONT; ALU_MOD_K: - tmp = (u32) A; - A = do_div(tmp, (u32) K); + tmp = (u32) DST; + DST = do_div(tmp, (u32) IMM); CONT; ALU64_DIV_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - do_div(A, X); + do_div(DST, SRC); CONT; ALU_DIV_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = (u32) A; - do_div(tmp, (u32) X); - A = (u32) tmp; + tmp = (u32) DST; + do_div(tmp, (u32) SRC); + DST = (u32) tmp; CONT; ALU64_DIV_K: - do_div(A, K); + do_div(DST, IMM); CONT; ALU_DIV_K: - tmp = (u32) A; - do_div(tmp, (u32) K); - A = (u32) tmp; + tmp = (u32) DST; + do_div(tmp, (u32) IMM); + DST = (u32) tmp; CONT; ALU_END_TO_BE: - switch (K) { + switch (IMM) { case 16: - A = (__force u16) cpu_to_be16(A); + DST = (__force u16) cpu_to_be16(DST); break; case 32: - A = (__force u32) cpu_to_be32(A); + DST = (__force u32) cpu_to_be32(DST); break; case 64: - A = (__force u64) cpu_to_be64(A); + DST = (__force u64) cpu_to_be64(DST); break; } CONT; ALU_END_TO_LE: - switch (K) { + switch (IMM) { case 16: - A = (__force u16) cpu_to_le16(A); + DST = (__force u16) cpu_to_le16(DST); break; case 32: - A = (__force u32) cpu_to_le32(A); + DST = (__force u32) cpu_to_le32(DST); break; case 64: - A = (__force u64) cpu_to_le64(A); + DST = (__force u64) cpu_to_le64(DST); break; } CONT; @@ -401,85 +401,85 @@ select_insn: insn += insn->off; CONT; JMP_JEQ_X: - if (A == X) { + if (DST == SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JEQ_K: - if (A == K) { + if (DST == IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JNE_X: - if (A != X) { + if (DST != SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JNE_K: - if (A != K) { + if (DST != IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JGT_X: - if (A > X) { + if (DST > SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JGT_K: - if (A > K) { + if (DST > IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JGE_X: - if (A >= X) { + if (DST >= SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JGE_K: - if (A >= K) { + if (DST >= IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGT_X: - if (((s64) A) > ((s64) X)) { + if (((s64) DST) > ((s64) SRC)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGT_K: - if (((s64) A) > ((s64) K)) { + if (((s64) DST) > ((s64) IMM)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGE_X: - if (((s64) A) >= ((s64) X)) { + if (((s64) DST) >= ((s64) SRC)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGE_K: - if (((s64) A) >= ((s64) K)) { + if (((s64) DST) >= ((s64) IMM)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSET_X: - if (A & X) { + if (DST & SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JSET_K: - if (A & K) { + if (DST & IMM) { insn += insn->off; CONT_JMP; } @@ -488,15 +488,15 @@ select_insn: return BPF_R0; /* STX and ST and LDX*/ -#define LDST(SIZEOP, SIZE) \ - STX_MEM_##SIZEOP: \ - *(SIZE *)(unsigned long) (A + insn->off) = X; \ - CONT; \ - ST_MEM_##SIZEOP: \ - *(SIZE *)(unsigned long) (A + insn->off) = K; \ - CONT; \ - LDX_MEM_##SIZEOP: \ - A = *(SIZE *)(unsigned long) (X + insn->off); \ +#define LDST(SIZEOP, SIZE) \ + STX_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ + CONT; \ + ST_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ + CONT; \ + LDX_MEM_##SIZEOP: \ + DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ CONT; LDST(B, u8) @@ -504,16 +504,16 @@ select_insn: LDST(W, u32) LDST(DW, u64) #undef LDST - STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ - atomic_add((u32) X, (atomic_t *)(unsigned long) - (A + insn->off)); + STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ + atomic_add((u32) SRC, (atomic_t *)(unsigned long) + (DST + insn->off)); CONT; - STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ - atomic64_add((u64) X, (atomic64_t *)(unsigned long) - (A + insn->off)); + STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ + atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) + (DST + insn->off)); CONT; - LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ - off = K; + LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ + off = IMM; load_word: /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are * only appearing in the programs where ctx == @@ -527,51 +527,51 @@ load_word: * BPF_R6-BPF_R9, and store return value into BPF_R0. * * Implicit input: - * ctx + * ctx == skb == BPF_R6 == CTX * * Explicit input: - * X == any register - * K == 32-bit immediate + * SRC == any register + * IMM == 32-bit immediate * * Output: * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ - ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 4, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be32(ptr); CONT; } return 0; - LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ - off = K; + LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ + off = IMM; load_half: - ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 2, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be16(ptr); CONT; } return 0; - LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ - off = K; + LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ + off = IMM; load_byte: - ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 1, &tmp); if (likely(ptr != NULL)) { BPF_R0 = *(u8 *)ptr; CONT; } return 0; - LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ - off = K + X; + LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ + off = IMM + SRC; goto load_word; - LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ - off = K + X; + LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ + off = IMM + SRC; goto load_half; - LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ - off = K + X; + LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ + off = IMM + SRC; goto load_byte; default_label: @@ -675,7 +675,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - /* A = *(u16 *) (ctx + offsetof(protocol)) */ + /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ @@ -741,7 +741,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); - /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ + /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, vlan_tci)); if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { @@ -760,13 +760,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: - /* arg1 = ctx */ + /* arg1 = CTX */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); - /* Emit call(ctx, arg2=A, arg3=X) */ + /* Emit call(arg1=CTX, arg2=A, arg3=X) */ switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: *insn = BPF_EMIT_CALL(__skb_get_pay_offset); @@ -941,12 +941,12 @@ do_pass: */ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_TMP; + insn->dst_reg = BPF_REG_A; + insn->src_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_X; + insn->dst_reg = BPF_REG_A; + insn->src_reg = BPF_REG_X; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); } -- cgit v1.2.3-59-g8ed1b From 5882a07c72093dc3a18e2d2b129fb200686bb6ee Mon Sep 17 00:00:00 2001 From: Wei-Chun Chao Date: Sun, 8 Jun 2014 23:48:54 -0700 Subject: net: fix UDP tunnel GSO of frag_list GRO packets This patch fixes a kernel BUG_ON in skb_segment. It is hit when testing two VMs on openvswitch with one VM acting as VXLAN gateway. During VXLAN packet GSO, skb_segment is called with skb->data pointing to inner TCP payload. skb_segment calls skb_network_protocol to retrieve the inner protocol. skb_network_protocol actually expects skb->data to point to MAC and it calls pskb_may_pull with ETH_HLEN. This ends up pulling in ETH_HLEN data from header tail. As a result, pskb_trim logic is skipped and BUG_ON is hit later. Move skb_push in front of skb_network_protocol so that skb->data lines up properly. kernel BUG at net/core/skbuff.c:2999! Call Trace: [] tcp_gso_segment+0x122/0x410 [] inet_gso_segment+0x13c/0x390 [] skb_mac_gso_segment+0x9b/0x170 [] skb_udp_tunnel_segment+0xd8/0x390 [] udp4_ufo_fragment+0x120/0x140 [] inet_gso_segment+0x13c/0x390 [] ? default_wake_function+0x12/0x20 [] skb_mac_gso_segment+0x9b/0x170 [] __skb_gso_segment+0x60/0xc0 [] dev_hard_start_xmit+0x183/0x550 [] sch_direct_xmit+0xfe/0x1d0 [] __dev_queue_xmit+0x214/0x4f0 [] dev_queue_xmit+0x10/0x20 [] ip_finish_output+0x66b/0x890 [] ip_output+0x58/0x90 [] ? fib_table_lookup+0x29f/0x350 [] ip_local_out_sk+0x39/0x50 [] iptunnel_xmit+0x10d/0x130 [] vxlan_xmit_skb+0x1d0/0x330 [vxlan] [] vxlan_tnl_send+0x129/0x1a0 [openvswitch] [] ovs_vport_send+0x26/0xa0 [openvswitch] [] do_output+0x2e/0x50 [openvswitch] Signed-off-by: Wei-Chun Chao Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8383b2bddeb9..9433047b2453 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2881,12 +2881,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int pos; int dummy; + __skb_push(head_skb, doffset); proto = skb_network_protocol(head_skb, &dummy); if (unlikely(!proto)) return ERR_PTR(-EINVAL); csum = !!can_checksum_protocol(features, proto); - __skb_push(head_skb, doffset); + headroom = skb_headroom(head_skb); pos = skb_headlen(head_skb); -- cgit v1.2.3-59-g8ed1b From 61f83d0d57f1ec42a61b47b0ace97bbf0a8523a3 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 11 Jun 2014 13:16:44 -0700 Subject: net: filter: fix warning on 32-bit arch fix compiler warning on 32-bit architectures: net/core/filter.c: In function '__sk_run_filter': net/core/filter.c:540:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] net/core/filter.c:550:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] net/core/filter.c:560:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] Reported-by: Fengguang Wu Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index b3f21751b238..735fad897496 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -537,7 +537,7 @@ load_word: * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ - ptr = load_pointer((struct sk_buff *) CTX, off, 4, &tmp); + ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be32(ptr); CONT; @@ -547,7 +547,7 @@ load_word: LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ off = IMM; load_half: - ptr = load_pointer((struct sk_buff *) CTX, off, 2, &tmp); + ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be16(ptr); CONT; @@ -557,7 +557,7 @@ load_half: LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ off = IMM; load_byte: - ptr = load_pointer((struct sk_buff *) CTX, off, 1, &tmp); + ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp); if (likely(ptr != NULL)) { BPF_R0 = *(u8 *)ptr; CONT; -- cgit v1.2.3-59-g8ed1b From bad93e9d4eeb0d2d6b79204d6cedc7f2e7b256f1 Mon Sep 17 00:00:00 2001 From: Octavian Purdila Date: Thu, 12 Jun 2014 01:36:26 +0300 Subject: net: add __pskb_copy_fclone and pskb_copy_for_clone There are several instances where a pskb_copy or __pskb_copy is immediately followed by an skb_clone. Add a couple of new functions to allow the copy skb to be allocated from the fclone cache and thus speed up subsequent skb_clone calls. Cc: Alexander Smirnov Cc: Dmitry Eremin-Solenikov Cc: Marek Lindner Cc: Simon Wunderlich Cc: Antonio Quartulli Cc: Marcel Holtmann Cc: Gustavo Padovan Cc: Johan Hedberg Cc: Arvid Brodin Cc: Patrick McHardy Cc: Pablo Neira Ayuso Cc: Jozsef Kadlecsik Cc: Lauro Ramos Venancio Cc: Aloisio Almeida Jr Cc: Samuel Ortiz Cc: Jon Maloy Cc: Allan Stephens Cc: Andrew Hendry Cc: Eric Dumazet Reviewed-by: Christoph Paasch Signed-off-by: Octavian Purdila Signed-off-by: David S. Miller --- include/linux/skbuff.h | 16 +++++++++++++++- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/network-coding.c | 2 +- net/bluetooth/hci_sock.c | 6 +++--- net/core/skbuff.c | 14 +++++++++----- net/nfc/llcp_core.c | 4 ++-- net/nfc/rawsock.c | 4 ++-- net/tipc/bcast.c | 2 +- 8 files changed, 34 insertions(+), 16 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c705808bef9c..1f50bfe2243d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -744,7 +744,13 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); -struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); +struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, + gfp_t gfp_mask, bool fclone); +static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, headroom, gfp_mask, false); +} int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, @@ -2238,6 +2244,14 @@ static inline struct sk_buff *pskb_copy(struct sk_buff *skb, return __pskb_copy(skb, skb_headroom(skb), gfp_mask); } + +static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); +} + + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index dcd99b2bea3c..f2c066b21716 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -594,7 +594,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv, if (!neigh_node) goto free_orig; - tmp_skb = pskb_copy(skb, GFP_ATOMIC); + tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC); if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb, cand[i].orig_node, packet_subtype)) { diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 40a2fc4bcf4c..8d04d174669e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1344,7 +1344,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv, struct ethhdr *ethhdr; /* Copy skb header to change the mac header */ - skb = pskb_copy(skb, GFP_ATOMIC); + skb = pskb_copy_for_clone(skb, GFP_ATOMIC); if (!skb) return; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f608bffdb8b9..80d25c150a65 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -143,7 +143,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) if (!skb_copy) { /* Create a private copy with headroom */ - skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); + skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true); if (!skb_copy) continue; @@ -247,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) struct hci_mon_hdr *hdr; /* Create a private copy with headroom */ - skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, - GFP_ATOMIC); + skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, + GFP_ATOMIC, true); if (!skb_copy) continue; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 05f4bef2ce12..b9e85e6cb26a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -951,10 +951,13 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) EXPORT_SYMBOL(skb_copy); /** - * __pskb_copy - create copy of an sk_buff with private head. + * __pskb_copy_fclone - create copy of an sk_buff with private head. * @skb: buffer to copy * @headroom: headroom of new skb * @gfp_mask: allocation priority + * @fclone: if true allocate the copy of the skb from the fclone + * cache instead of the head cache; it is recommended to set this + * to true for the cases where the copy will likely be cloned * * Make a copy of both an &sk_buff and part of its data, located * in header. Fragmented data remain shared. This is used when @@ -964,11 +967,12 @@ EXPORT_SYMBOL(skb_copy); * The returned buffer has a reference count of 1. */ -struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) +struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, + gfp_t gfp_mask, bool fclone) { unsigned int size = skb_headlen(skb) + headroom; - struct sk_buff *n = __alloc_skb(size, gfp_mask, - skb_alloc_rx_flag(skb), NUMA_NO_NODE); + int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); + struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); if (!n) goto out; @@ -1008,7 +1012,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) out: return n; } -EXPORT_SYMBOL(__pskb_copy); +EXPORT_SYMBOL(__pskb_copy_fclone); /** * pskb_expand_head - reallocate header of &sk_buff diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index f6278da68763..51e788797317 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -680,8 +680,8 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, continue; if (skb_copy == NULL) { - skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE, - GFP_ATOMIC); + skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, + GFP_ATOMIC, true); if (skb_copy == NULL) continue; diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 55eefee311eb..11c3544ea546 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -378,8 +378,8 @@ void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, sk_for_each(sk, &raw_sk_list.head) { if (!skb_copy) { - skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE, - GFP_ATOMIC); + skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, + GFP_ATOMIC, true); if (!skb_copy) continue; diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 671f9817b4f4..26631679a1fa 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -653,7 +653,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, tipc_bearer_send(b->identity, buf, &b->bcast_addr); } else { /* Avoid concurrent buffer access */ - tbuf = pskb_copy(buf, GFP_ATOMIC); + tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); if (!tbuf) break; tipc_bearer_send(b->identity, tbuf, &b->bcast_addr); -- cgit v1.2.3-59-g8ed1b From 7e3cead5172927732f51fde77fef6f521e22f209 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 10 Jun 2014 18:54:19 -0700 Subject: net: Save software checksum complete In skb_checksum complete, if we need to compute the checksum for the packet (via skb_checksum) save the result as CHECKSUM_COMPLETE. Subsequent checksum verification can use this. Also, added csum_complete_sw flag to distinguish between software and hardware generated checksum complete, we should always be able to trust the software computation. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 3 ++- net/core/datagram.c | 14 +++++++++----- net/ipv4/gre_offload.c | 6 ++++-- net/sunrpc/socklib.c | 3 ++- 4 files changed, 17 insertions(+), 9 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 72a53805858a..5b5cd3189c98 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -573,7 +573,8 @@ struct sk_buff { __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; - /* 4/6 bit hole (depending on ndisc_nodetype presence) */ + __u8 csum_complete_sw:1; + /* 3/5 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL diff --git a/net/core/datagram.c b/net/core/datagram.c index a16ed7bbe376..6b1c04ca1d50 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) __sum16 sum; sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); - if (likely(!sum)) { - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) - netdev_rx_csum_fault(skb->dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + + /* Save checksum complete for later use */ + skb->csum = sum; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_complete_sw = 1; + return sum; } EXPORT_SYMBOL(__skb_checksum_complete_head); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 24deb3928b9e..eb92deb12666 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -131,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb) csum_partial(skb->data, skb_gro_offset(skb), 0)); sum = csum_fold(NAPI_GRO_CB(skb)->csum); if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { - if (unlikely(!sum)) + if (unlikely(!sum) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); - } else + } else { skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_complete_sw = 1; + } return sum; } diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 0a648c502fc3..2df87f78e518 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -173,7 +173,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) return -1; if (csum_fold(desc.csum)) return -1; - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); return 0; no_checksum: -- cgit v1.2.3-59-g8ed1b From c5b46160877a9bb23ab6f96fb666e7766f407704 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Wed, 11 Jun 2014 10:38:03 -0400 Subject: net/core: Add VF link state control policy Commit 1d8faf48c7 (net/core: Add VF link state control) added VF link state control to the netlink VF nested structure, but failed to add a proper entry for the new structure into the VF policy table. Add the missing entry so the table and the actual data copied into the netlink nested struct are in sync. Signed-off-by: Doug Ledford Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net/core') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fbdb1556b0db..d57d7bc22182 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1208,6 +1208,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { .len = sizeof(struct ifla_vf_tx_rate) }, [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_spoofchk) }, + [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_vf_link_state) }, }; static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { -- cgit v1.2.3-59-g8ed1b From e5eca6d41f53db48edd8cf88a3f59d2c30227f8e Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Wed, 28 May 2014 14:15:19 +0200 Subject: rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 When running RHEL6 userspace on a current upstream kernel, "ip link" fails to show VF information. The reason is a kernel<->userspace API change introduced by commit 88c5b5ce5cb57 ("rtnetlink: Call nlmsg_parse() with correct header length"), after which the kernel does not see iproute2's IFLA_EXT_MASK attribute in the netlink request. iproute2 adjusted for the API change in its commit 63338dca4513 ("libnetlink: Use ifinfomsg instead of rtgenmsg in rtnl_wilddump_req_filter"). The problem has been noticed before: http://marc.info/?l=linux-netdev&m=136692296022182&w=2 (Subject: Re: getting VF link info seems to be broken in 3.9-rc8) We can do better than tell those with old userspace to upgrade. We can recognize the old iproute2 in the kernel by checking the netlink message length. Even when including the IFLA_EXT_MASK attribute, its netlink message is shorter than struct ifinfomsg. With this patch "ip link" shows VF information in both old and new iproute2 versions. Signed-off-by: Michal Schmidt Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'net/core') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 233b5ae87583..1063996f8317 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1244,6 +1244,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; int err; + int hdrlen; s_h = cb->args[0]; s_idx = cb->args[1]; @@ -1251,8 +1252,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); cb->seq = net->dev_base_seq; - if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, - ifla_policy) >= 0) { + /* A hack to preserve kernel<->userspace interface. + * The correct header is ifinfomsg. It is consistent with rtnl_getlink. + * However, before Linux v3.9 the code here assumed rtgenmsg and that's + * what iproute2 < v3.9.0 used. + * We can detect the old iproute2. Even including the IFLA_EXT_MASK + * attribute, its netlink message is shorter than struct ifinfomsg. + */ + hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); + + if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@ -2126,9 +2136,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0; + int hdrlen; + + /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ + hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, - ifla_policy) >= 0) { + if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); } -- cgit v1.2.3-59-g8ed1b