aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 14:04:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 14:04:18 -0700
commit5bb053bef82523a8fd78d650bca81c9f114fa276 (patch)
tree58c2fe47f60bb69230bb05d57a6c9e3f47f7b1fe /include/linux
parentMerge tag 'docs-4.17' of git://git.lwn.net/linux (diff)
parentMerge branch 'net-mvneta-improve-suspend-resume' (diff)
downloadwireguard-linux-5bb053bef82523a8fd78d650bca81c9f114fa276.tar.xz
wireguard-linux-5bb053bef82523a8fd78d650bca81c9f114fa276.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Support offloading wireless authentication to userspace via NL80211_CMD_EXTERNAL_AUTH, from Srinivas Dasari. 2) A lot of work on network namespace setup/teardown from Kirill Tkhai. Setup and cleanup of namespaces now all run asynchronously and thus performance is significantly increased. 3) Add rx/tx timestamping support to mv88e6xxx driver, from Brandon Streiff. 4) Support zerocopy on RDS sockets, from Sowmini Varadhan. 5) Use denser instruction encoding in x86 eBPF JIT, from Daniel Borkmann. 6) Support hw offload of vlan filtering in mvpp2 dreiver, from Maxime Chevallier. 7) Support grafting of child qdiscs in mlxsw driver, from Nogah Frankel. 8) Add packet forwarding tests to selftests, from Ido Schimmel. 9) Deal with sub-optimal GSO packets better in BBR congestion control, from Eric Dumazet. 10) Support 5-tuple hashing in ipv6 multipath routing, from David Ahern. 11) Add path MTU tests to selftests, from Stefano Brivio. 12) Various bits of IPSEC offloading support for mlx5, from Aviad Yehezkel, Yossi Kuperman, and Saeed Mahameed. 13) Support RSS spreading on ntuple filters in SFC driver, from Edward Cree. 14) Lots of sockmap work from John Fastabend. Applications can use eBPF to filter sendmsg and sendpage operations. 15) In-kernel receive TLS support, from Dave Watson. 16) Add XDP support to ixgbevf, this is significant because it should allow optimized XDP usage in various cloud environments. From Tony Nguyen. 17) Add new Intel E800 series "ice" ethernet driver, from Anirudh Venkataramanan et al. 18) IP fragmentation match offload support in nfp driver, from Pieter Jansen van Vuuren. 19) Support XDP redirect in i40e driver, from Björn Töpel. 20) Add BPF_RAW_TRACEPOINT program type for accessing the arguments of tracepoints in their raw form, from Alexei Starovoitov. 21) Lots of striding RQ improvements to mlx5 driver with many performance improvements, from Tariq Toukan. 22) Use rhashtable for inet frag reassembly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1678 commits) net: mvneta: improve suspend/resume net: mvneta: split rxq/txq init and txq deinit into SW and HW parts ipv6: frags: fix /proc/sys/net/ipv6/ip6frag_low_thresh net: bgmac: Fix endian access in bgmac_dma_tx_ring_free() net: bgmac: Correctly annotate register space route: check sysctl_fib_multipath_use_neigh earlier than hash fix typo in command value in drivers/net/phy/mdio-bitbang. sky2: Increase D3 delay to sky2 stops working after suspend net/mlx5e: Set EQE based as default TX interrupt moderation mode ibmvnic: Disable irqs before exiting reset from closed state net: sched: do not emit messages while holding spinlock vlan: also check phy_driver ts_info for vlan's real device Bluetooth: Mark expected switch fall-throughs Bluetooth: Set HCI_QUIRK_SIMULTANEOUS_DISCOVERY for BTUSB_QCA_ROME Bluetooth: btrsi: remove unused including <linux/version.h> Bluetooth: hci_bcm: Remove DMI quirk for the MINIX Z83-4 sh_eth: kill useless check in __sh_eth_get_regs() sh_eth: add sh_eth_cpu_data::no_xdfar flag ipv6: factorize sk_wmem_alloc updates done by __ip6_append_data() ipv4: factorize sk_wmem_alloc updates done by __ip_append_data() ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/avf/virtchnl.h107
-rw-r--r--include/linux/bpf-cgroup.h68
-rw-r--r--include/linux/bpf.h6
-rw-r--r--include/linux/bpf_types.h3
-rw-r--r--include/linux/bpf_verifier.h13
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/filter.h34
-rw-r--r--include/linux/ieee80211.h14
-rw-r--r--include/linux/if_vlan.h24
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/mlx5/accel.h144
-rw-r--r--include/linux/mlx5/cq.h20
-rw-r--r--include/linux/mlx5/device.h7
-rw-r--r--include/linux/mlx5/driver.h94
-rw-r--r--include/linux/mlx5/eswitch.h58
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/fs_helpers.h134
-rw-r--r--include/linux/mlx5/mlx5_ifc.h132
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h92
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mroute.h117
-rw-r--r--include/linux/mroute6.h70
-rw-r--r--include/linux/mroute_base.h474
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h125
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h5
-rw-r--r--include/linux/of_net.h6
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/phylink.h17
-rw-r--r--include/linux/ptp_classify.h4
-rw-r--r--include/linux/ptr_ring.h7
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/eth_common.h2
-rw-r--r--include/linux/qed/iscsi_common.h4
-rw-r--r--include/linux/qed/qed_if.h19
-rw-r--r--include/linux/qed/rdma_common.h2
-rw-r--r--include/linux/qed/roce_common.h3
-rw-r--r--include/linux/rhashtable.h8
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--include/linux/sfp.h18
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/trace_events.h42
-rw-r--r--include/linux/tracepoint-defs.h6
50 files changed, 1639 insertions, 329 deletions
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 4d356e168692..40373920ea58 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -113,10 +113,12 @@ extern void aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
+#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
}
+#endif
extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev);
extern struct net_device *atrtr_get_dev(struct atalk_addr *sa);
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 3ce61342fa31..b0a7f315bfbe 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -136,15 +136,21 @@ enum virtchnl_ops {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
};
-/* This macro is used to generate a compilation error if a structure
+/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
@@ -244,6 +250,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -496,6 +503,81 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ __u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -711,6 +793,25 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+ valid_len += vti->num_tc *
+ sizeof(struct virtchnl_channel_info);
+ if (vti->num_tc == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a7f16e0f8d68..30d15e64b993 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -6,6 +6,7 @@
#include <uapi/linux/bpf.h>
struct sock;
+struct sockaddr;
struct cgroup;
struct sk_buff;
struct bpf_sock_ops_kern;
@@ -63,6 +64,10 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum bpf_attach_type type);
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
+ enum bpf_attach_type type);
+
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
enum bpf_attach_type type);
@@ -93,16 +98,64 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
__ret; \
})
+#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ } \
+ __ret; \
+})
+
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, \
- BPF_CGROUP_INET_SOCK_CREATE); \
+ if (cgroup_bpf_enabled) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ release_sock(sk); \
} \
__ret; \
})
+#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
+ sk->sk_prot->pre_connect)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
@@ -132,9 +185,18 @@ struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 66df387106de..95a7abd0ee92 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -21,6 +21,7 @@ struct bpf_verifier_env;
struct perf_event;
struct bpf_prog;
struct bpf_map;
+struct sock;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
@@ -207,12 +208,15 @@ struct bpf_prog_ops {
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+ const struct bpf_func_proto *
+ (*get_func_proto)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 19b8349a3809..2b28fcf6f6ae 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -8,16 +8,19 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 6b66cd1aa0b9..7e61c395fddf 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -153,7 +153,7 @@ struct bpf_insn_aux_data {
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
-struct bpf_verifer_log {
+struct bpf_verifier_log {
u32 level;
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
char __user *ubuf;
@@ -161,11 +161,16 @@ struct bpf_verifer_log {
u32 len_total;
};
-static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
+static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
{
return log->len_used >= log->len_total - 1;
}
+static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+{
+ return log->level && log->ubuf && !bpf_verifier_log_full(log);
+}
+
#define BPF_MAX_SUBPROGS 256
/* single container for all structs
@@ -185,13 +190,15 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
- struct bpf_verifer_log log;
+ struct bpf_verifier_log log;
u32 subprog_starts[BPF_MAX_SUBPROGS];
/* computes the stack depth of each bpf function */
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
};
+void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
+ va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 2ec41a7eb54f..ebe41811ed34 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -371,6 +371,11 @@ struct ethtool_ops {
u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
const u8 *key, const u8 hfunc);
+ int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc, u32 rss_context);
+ int (*set_rxfh_context)(struct net_device *, const u32 *indir,
+ const u8 *key, const u8 hfunc,
+ u32 *rss_context, bool delete);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 276932d75975..fc4e8f91b03d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,7 +20,6 @@
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
-#include <net/xdp.h>
#include <net/sch_generic.h>
#include <uapi/linux/filter.h>
@@ -30,6 +29,7 @@ struct sk_buff;
struct sock;
struct seccomp_data;
struct bpf_prog_aux;
+struct xdp_rxq_info;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -372,7 +372,7 @@ struct bpf_prog_aux;
#define BPF_LDST_BYTES(insn) \
({ \
- const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
+ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
WARN_ON(__size < 0); \
__size; \
})
@@ -469,6 +469,7 @@ struct bpf_prog {
is_func:1, /* program is a bpf function */
kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
u8 tag[BPF_TAG_SIZE];
@@ -507,6 +508,24 @@ struct xdp_buff {
struct xdp_rxq_info *rxq;
};
+struct sk_msg_buff {
+ void *data;
+ void *data_end;
+ __u32 apply_bytes;
+ __u32 cork_bytes;
+ int sg_copybreak;
+ int sg_start;
+ int sg_curr;
+ int sg_end;
+ struct scatterlist sg_data[MAX_SKB_FRAGS];
+ bool sg_copy[MAX_SKB_FRAGS];
+ __u32 key;
+ __u32 flags;
+ struct bpf_map *map;
+ struct sk_buff *skb;
+ struct list_head list;
+};
+
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -771,6 +790,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
+struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
@@ -1001,6 +1021,16 @@ static inline int bpf_tell_extensions(void)
return SKF_AD_MAX;
}
+struct bpf_sock_addr_kern {
+ struct sock *sk;
+ struct sockaddr *uaddr;
+ /* Temporary "register" to make indirect stores to nested structures
+ * defined above. We need three registers to make such a store, but
+ * only two (src and dst) are available at convert_ctx_access time
+ */
+ u64 tmp_reg;
+};
+
struct bpf_sock_ops_kern {
struct sock *sk;
u32 op;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index ee6657a0ed69..8fe7e4306816 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -8,6 +8,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2111,7 +2112,7 @@ enum ieee80211_key_len {
#define FILS_ERP_MAX_REALM_LEN 253
#define FILS_ERP_MAX_RRK_LEN 64
-#define PMK_MAX_LEN 48
+#define PMK_MAX_LEN 64
/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */
enum ieee80211_pub_actioncode {
@@ -2502,6 +2503,17 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_get_tid - get qos TID
+ * @hdr: the frame
+ */
+static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
+{
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ return qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
*
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 7d30892da064..d11f41d5269f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -83,6 +83,30 @@ static inline bool is_vlan_dev(const struct net_device *dev)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
+}
+
+static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
+}
+
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fd291503576..293fa0677fba 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -919,6 +919,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
new file mode 100644
index 000000000000..70e7e5673ce9
--- /dev/null
+++ b/include/linux/mlx5/accel.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_H__
+#define __MLX5_ACCEL_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
+ MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
+};
+
+enum mlx5_accel_esp_flags {
+ MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
+ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
+ MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
+};
+
+enum mlx5_accel_esp_action {
+ MLX5_ACCEL_ESP_ACTION_DECRYPT,
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT,
+};
+
+enum mlx5_accel_esp_keymats {
+ MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
+ MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
+};
+
+enum mlx5_accel_esp_replay {
+ MLX5_ACCEL_ESP_REPLAY_NONE,
+ MLX5_ACCEL_ESP_REPLAY_BMP,
+};
+
+struct aes_gcm_keymat {
+ u64 seq_iv;
+ enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
+
+ u32 salt;
+ u32 icv_len;
+
+ u32 key_len;
+ u32 aes_key[256 / 32];
+};
+
+struct mlx5_accel_esp_xfrm_attrs {
+ enum mlx5_accel_esp_action action;
+ u32 esn;
+ u32 spi;
+ u32 seq;
+ u32 tfc_pad;
+ u32 flags;
+ u32 sa_handle;
+ enum mlx5_accel_esp_replay replay_type;
+ union {
+ struct {
+ u32 size;
+
+ } bmp;
+ } replay;
+ enum mlx5_accel_esp_keymats keymat_type;
+ union {
+ struct aes_gcm_keymat aes_gcm;
+ } keymat;
+};
+
+struct mlx5_accel_esp_xfrm {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+};
+
+enum {
+ MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
+};
+
+enum mlx5_accel_ipsec_cap {
+ MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
+ MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
+ MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
+ MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
+ MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
+ MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
+ MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
+ MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
+};
+
+#ifdef CONFIG_MLX5_ACCEL
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags);
+void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
+int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs);
+
+#else
+
+static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
+
+static inline struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
+static inline int
+mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
+
+#endif
+#endif
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 48c181a2acc9..0ef6138eca49 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,6 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ struct mlx5_eq *eq;
};
@@ -171,8 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
+{
+ refcount_inc(&cq->refcount);
+}
+
+static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
@@ -183,6 +193,12 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
+static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
+ struct mlx5_err_cqe *err_cqe)
+{
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index e5258ee4e38b..12758595459b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -782,6 +782,9 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
+
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
__be16 byte_cnt;
@@ -1013,6 +1016,7 @@ enum mlx5_cap_type {
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1140,6 +1144,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+#define MLX5_CAP_DEBUG(mdev, cap)\
+ MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9d3a03364e6e..cded85ab6fe4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -345,13 +345,6 @@ struct mlx5_buf_list {
dma_addr_t map;
};
-struct mlx5_buf {
- struct mlx5_buf_list direct;
- int npages;
- int size;
- u8 page_shift;
-};
-
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
@@ -359,6 +352,15 @@ struct mlx5_frag_buf {
u8 page_shift;
};
+struct mlx5_frag_buf_ctrl {
+ struct mlx5_frag_buf frag_buf;
+ u32 sz_m1;
+ u32 frag_sz_m1;
+ u8 log_sz;
+ u8 log_stride;
+ u8 log_frag_strides;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -375,11 +377,18 @@ struct mlx5_eq_pagefault {
mempool_t *pool;
};
+struct mlx5_cq_table {
+ /* protect radix tree */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
+ struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
int size;
unsigned int irqn;
u8 eqn;
@@ -526,13 +535,6 @@ struct mlx5_core_health {
struct delayed_work recover_work;
};
-struct mlx5_cq_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
struct mlx5_qp_table {
/* protect radix tree
*/
@@ -654,10 +656,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
- /* start: cq staff */
- struct mlx5_cq_table cq_table;
- /* end: cq staff */
-
/* start: mkey staff */
struct mlx5_mkey_table mkey_table;
/* end: mkey staff */
@@ -936,9 +934,9 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
-static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
+static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
{
- return buf->direct.buf + offset;
+ return buf->frags->buf + offset;
}
#define STRUCT_FIELD(header, field) \
@@ -977,6 +975,25 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
+static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
+ void *cqc)
+{
+ fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ fbc->sz_m1 = (1 << fbc->log_sz) - 1;
+ fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
+ fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+}
+
+static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
+ u32 ix)
+{
+ unsigned int frag = (ix >> fbc->log_frag_strides);
+
+ return fbc->frag_buf.frags[frag].buf +
+ ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -1002,9 +1019,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+ struct mlx5_frag_buf *buf, int node);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev,
+ int size, struct mlx5_frag_buf *buf);
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -1049,22 +1067,12 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+
+void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1076,14 +1084,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
@@ -1224,6 +1224,12 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
}
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
+#define MLX5_VPORT_MANAGER(mdev) \
+ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
+ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
+ mlx5_core_is_pf(mdev))
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
new file mode 100644
index 000000000000..d3c9db492b30
--- /dev/null
+++ b/include/linux/mlx5/eswitch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _MLX5_ESWITCH_
+#define _MLX5_ESWITCH_
+
+#include <linux/mlx5/driver.h>
+
+enum {
+ SRIOV_NONE,
+ SRIOV_LEGACY,
+ SRIOV_OFFLOADS
+};
+
+enum {
+ REP_ETH,
+ REP_IB,
+ NUM_REP_TYPES,
+};
+
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
+};
+
+struct mlx5_eswitch_rep {
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
+ u16 vport;
+ u8 hw_id[ETH_ALEN];
+ u16 vlan;
+ u32 vlan_refcount;
+};
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ int vport,
+ u8 rep_type);
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ int vport);
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
+ int vport, u32 sqn);
+#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index a0b48afcb422..47aecc4fa8c2 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,6 +40,8 @@
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
};
enum {
@@ -69,6 +71,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_EGRESS,
};
struct mlx5_flow_table;
@@ -139,11 +142,20 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
struct mlx5_flow_act {
u32 action;
+ bool has_flow_tag;
u32 flow_tag;
u32 encap_id;
u32 modify_id;
+ uintptr_t esp_id;
+ struct mlx5_fs_vlan vlan;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
new file mode 100644
index 000000000000..7b476bbae731
--- /dev/null
+++ b/include/linux/mlx5/fs_helpers.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_HELPERS_
+#define _MLX5_FS_HELPERS_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_IPV4_VERSION 4
+#define MLX5_FS_IPV6_VERSION 6
+
+static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
+ const u32 *match_v, u8 match)
+{
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
+}
+
+static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
+}
+
+static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
+}
+
+static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
+}
+
+static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
+ const u32 *match_c,
+ const u32 *match_v, int version)
+{
+ int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ if (!match_ipv) {
+ u16 ethertype;
+
+ switch (version) {
+ case MLX5_FS_IPV4_VERSION:
+ ethertype = ETH_P_IP;
+ break;
+ case MLX5_FS_IPV6_VERSION:
+ ethertype = ETH_P_IPV6;
+ break;
+ default:
+ return false;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ethertype) == 0xffff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ethertype) == ethertype;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ip_version) == 0xf &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ip_version) == version;
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV4_VERSION);
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV6_VERSION);
+}
+
+static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
+{
+ void *misc_params_c =
+ MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+}
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f4e417686f62..d25011f84815 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -143,6 +143,7 @@ enum {
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -295,7 +296,9 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x1a];
+ u8 reserved_at_40[0x17];
+ u8 outer_esp_spi[0x1];
+ u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
u8 reserved_at_5b[0x25];
@@ -311,7 +314,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 encap[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x17];
+ u8 reserved_at_9[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x14];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -437,7 +443,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_120[0x28];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0xa0];
+ u8 reserved_at_160[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -589,6 +597,16 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_debug_cap_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 stall_detect[0x1];
+ u8 reserved_at_23[0x1d];
+
+ u8 reserved_at_40[0x7c0];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -851,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x1];
+ u8 debug[0x1];
u8 modify_rq_counter_set_id[0x1];
u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
@@ -861,7 +879,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_at_1a4[0x1];
+ u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
@@ -993,7 +1011,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x8];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 reserved_at_343[0x5];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1017,7 +1038,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_398[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_3a0[0x3];
+ u8 ext_stride_num_range[0x1];
+ u8 reserved_at_3a1[0x2];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1091,6 +1113,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
};
@@ -1183,9 +1206,9 @@ struct mlx5_ifc_wq_bits {
u8 log_hairpin_num_packets[0x5];
u8 reserved_at_128[0x3];
u8 log_hairpin_data_sz[0x5];
- u8 reserved_at_130[0x5];
- u8 log_wqe_num_of_strides[0x3];
+ u8 reserved_at_130[0x4];
+ u8 log_wqe_num_of_strides[0x4];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
@@ -1567,7 +1590,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x400];
+ u8 reserved_at_3c0[0x40];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_at_480[0x340];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -2282,10 +2315,19 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+};
+
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_at_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
@@ -2361,6 +2403,24 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 reserved_at_180[0x80];
};
+struct mlx5_ifc_vnic_diagnostic_statistics_bits {
+ u8 counter_error_queues[0x20];
+
+ u8 total_error_queues[0x20];
+
+ u8 send_queue_priority_update_flow[0x20];
+
+ u8 reserved_at_60[0x20];
+
+ u8 nic_receive_steering_discard[0x40];
+
+ u8 receive_discard_vport_down[0x40];
+
+ u8 transmit_discard_vport_down[0x40];
+
+ u8 reserved_at_140[0xec0];
+};
+
struct mlx5_ifc_traffic_counter_bits {
u8 packets[0x40];
@@ -3636,6 +3696,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_vnic_env_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
+};
+
+enum {
+ MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
+};
+
+struct mlx5_ifc_query_vnic_env_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7808,7 +7897,11 @@ struct mlx5_ifc_pifr_reg_bits {
struct mlx5_ifc_pfcc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+ u8 minor_stall_mask[0x1];
+ u8 critical_stall_mask[0x1];
+ u8 reserved_at_1e[0x2];
u8 ppan[0x4];
u8 reserved_at_24[0x4];
@@ -7818,17 +7911,22 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_at_42[0x6];
+ u8 pptx_mask_n[0x1];
+ u8 reserved_at_43[0x5];
u8 pfctx[0x8];
u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_at_62[0x6];
+ u8 pprx_mask_n[0x1];
+ u8 reserved_at_63[0x5];
u8 pfcrx[0x8];
u8 reserved_at_70[0x10];
- u8 reserved_at_80[0x80];
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -7869,8 +7967,10 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7b];
+ u8 reserved_at_0[0x76];
+ u8 pfcc_mask[0x1];
+ u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 255a88d08078..ec052491ba3d 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -373,7 +373,10 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
- u8 reserved_0[0x15];
+ u8 reserved_0[0x12];
+ u8 v2_command[0x1];
+ u8 udp_encap[0x1];
+ u8 rx_no_trailer[0x1];
u8 ipv4_fragment[0x1];
u8 ipv6[0x1];
u8 esn[0x1];
@@ -429,4 +432,91 @@ struct mlx5_ifc_ipsec_counters_bits {
u8 dropped_cmd[0x40];
};
+enum mlx5_ifc_fpga_ipsec_response_syndrome {
+ MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
+ MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
+ MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
+ MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+};
+
+struct mlx5_ifc_fpga_ipsec_cmd_resp {
+ __be32 syndrome;
+ union {
+ __be32 sw_sa_handle;
+ __be32 flags;
+ };
+ u8 reserved[24];
+} __packed;
+
+enum mlx5_ifc_fpga_ipsec_cmd_opcode {
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
+ MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
+ MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
+};
+
+enum mlx5_ifc_fpga_ipsec_cap {
+ MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
+};
+
+struct mlx5_ifc_fpga_ipsec_cmd_cap {
+ __be32 cmd;
+ __be32 flags;
+ u8 reserved[24];
+} __packed;
+
+enum mlx5_ifc_fpga_ipsec_sa_flags {
+ MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
+ MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
+ MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
+ MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
+ MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
+ MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
+ MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
+ MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
+};
+
+enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
+};
+
+struct mlx5_ifc_fpga_ipsec_sa_v1 {
+ __be32 cmd;
+ u8 key_enc[32];
+ u8 key_auth[32];
+ __be32 sip[4];
+ __be32 dip[4];
+ union {
+ struct {
+ __be32 reserved;
+ u8 salt_iv[8];
+ __be32 salt;
+ } __packed gcm;
+ struct {
+ u8 salt[16];
+ } __packed cbc;
+ };
+ __be32 spi;
+ __be32 sw_sa_handle;
+ __be16 tfclen;
+ u8 enc_mode;
+ u8 reserved1[2];
+ u8 flags;
+ u8 reserved2[2];
+};
+
+struct mlx5_ifc_fpga_ipsec_sa {
+ struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
+ __be16 udp_sp;
+ __be16 udp_dp;
+ u8 reserved1[4];
+ __be32 esn;
+ __be16 vid; /* only 12 bits, rest is reserved */
+ __be16 reserved2;
+} __packed;
+
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 035f0d4dc9fe..34aed6032f86 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
u8 *pfc_en_rx);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 7e8f281f8c00..83a33a1873a6 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -47,6 +47,7 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
@@ -66,7 +67,6 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn);
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 64e193e87394..9208cb8809ac 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int vf, u8 port_num, void *out,
size_t out_sz);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 5396521a776a..9a36fad9e068 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -4,11 +4,10 @@
#include <linux/in.h>
#include <linux/pim.h>
-#include <linux/rhashtable.h>
-#include <net/sock.h>
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
+#include <linux/mroute_base.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
@@ -56,56 +55,8 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
}
#endif
-struct vif_device {
- struct net_device *dev; /* Device we are using */
- struct netdev_phys_item_id dev_parent_id; /* Device parent ID */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- __be32 local,remote; /* Addresses(remote for tunnels)*/
- int link; /* Physical interface index */
-};
-
-struct vif_entry_notifier_info {
- struct fib_notifier_info info;
- struct net_device *dev;
- vifi_t vif_index;
- unsigned short vif_flags;
- u32 tb_id;
-};
-
#define VIFF_STATIC 0x8000
-#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-
-struct mr_table {
- struct list_head list;
- possible_net_t net;
- u32 id;
- struct sock __rcu *mroute_sk;
- struct timer_list ipmr_expire_timer;
- struct list_head mfc_unres_queue;
- struct vif_device vif_table[MAXVIFS];
- struct rhltable mfc_hash;
- struct list_head mfc_cache_list;
- int maxvif;
- atomic_t cache_resolve_queue_len;
- bool mroute_do_assert;
- bool mroute_do_pim;
- int mroute_reg_vif_num;
-};
-
-/* mfc_flags:
- * MFC_STATIC - the entry was added statically (not by a routing daemon)
- * MFC_OFFLOAD - the entry was offloaded to the hardware
- */
-enum {
- MFC_STATIC = BIT(0),
- MFC_OFFLOAD = BIT(1),
-};
-
struct mfc_cache_cmp_arg {
__be32 mfc_mcastgrp;
__be32 mfc_origin;
@@ -113,28 +64,13 @@ struct mfc_cache_cmp_arg {
/**
* struct mfc_cache - multicast routing entries
- * @mnode: rhashtable list
+ * @_c: Common multicast routing information; has to be first [for casting]
* @mfc_mcastgrp: destination multicast group address
* @mfc_origin: source address
* @cmparg: used for rhashtable comparisons
- * @mfc_parent: source interface (iif)
- * @mfc_flags: entry flags
- * @expires: unresolved entry expire time
- * @unresolved: unresolved cached skbs
- * @last_assert: time of last assert
- * @minvif: minimum VIF id
- * @maxvif: maximum VIF id
- * @bytes: bytes that have passed for this entry
- * @pkt: packets that have passed for this entry
- * @wrong_if: number of wrong source interface hits
- * @lastuse: time of last use of the group (traffic or update)
- * @ttls: OIF TTL threshold array
- * @refcount: reference count for this entry
- * @list: global entry list
- * @rcu: used for entry destruction
*/
struct mfc_cache {
- struct rhlist_head mnode;
+ struct mr_mfc _c;
union {
struct {
__be32 mfc_mcastgrp;
@@ -142,57 +78,10 @@ struct mfc_cache {
};
struct mfc_cache_cmp_arg cmparg;
};
- vifi_t mfc_parent;
- int mfc_flags;
-
- union {
- struct {
- unsigned long expires;
- struct sk_buff_head unresolved;
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXVIFS];
- refcount_t refcount;
- } res;
- } mfc_un;
- struct list_head list;
- struct rcu_head rcu;
-};
-
-struct mfc_entry_notifier_info {
- struct fib_notifier_info info;
- struct mfc_cache *mfc;
- u32 tb_id;
};
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
-
-#ifdef CONFIG_IP_MROUTE
-void ipmr_cache_free(struct mfc_cache *mfc_cache);
-#else
-static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
-{
-}
-#endif
-
-static inline void ipmr_cache_put(struct mfc_cache *c)
-{
- if (refcount_dec_and_test(&c->mfc_un.res.refcount))
- ipmr_cache_free(c);
-}
-static inline void ipmr_cache_hold(struct mfc_cache *c)
-{
- refcount_inc(&c->mfc_un.res.refcount);
-}
-
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3014c52bfd86..c4a45859f586 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -7,6 +7,8 @@
#include <linux/skbuff.h> /* for struct sk_buff_head */
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
+#include <linux/mroute_base.h>
+#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -62,57 +64,33 @@ static inline void ip6_mr_cleanup(void)
}
#endif
-struct mif_device {
- struct net_device *dev; /* Device we are using */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- int link; /* Physical interface index */
-};
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+bool ip6mr_rule_default(const struct fib_rule *rule);
+#else
+static inline bool ip6mr_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+#endif
#define VIFF_STATIC 0x8000
-struct mfc6_cache {
- struct list_head list;
- struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
- struct in6_addr mf6c_origin; /* Source of packet */
- mifi_t mf6c_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+struct mfc6_cache_cmp_arg {
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+};
+struct mfc6_cache {
+ struct mr_mfc _c;
union {
struct {
- unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXMIFS]; /* TTL thresholds */
- } res;
- } mfc_un;
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+ };
+ struct mfc6_cache_cmp_arg cmparg;
+ };
};
-#define MFC_STATIC 1
-#define MFC_NOTIFY 2
-
-#define MFC6_LINES 64
-
-#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
- (__force u32)(a)->s6_addr32[1] ^ \
- (__force u32)(a)->s6_addr32[2] ^ \
- (__force u32)(a)->s6_addr32[3] ^ \
- (__force u32)(g)->s6_addr32[0] ^ \
- (__force u32)(g)->s6_addr32[1] ^ \
- (__force u32)(g)->s6_addr32[2] ^ \
- (__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
-
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
struct rtmsg;
@@ -120,12 +98,12 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
-extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
#else
-static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
+static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
- return NULL;
+ return false;
}
static inline int ip6mr_sk_done(struct sock *sk)
{
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
new file mode 100644
index 000000000000..d617fe45543e
--- /dev/null
+++ b/include/linux/mroute_base.h
@@ -0,0 +1,474 @@
+#ifndef __LINUX_MROUTE_BASE_H
+#define __LINUX_MROUTE_BASE_H
+
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/fib_notifier.h>
+
+/**
+ * struct vif_device - interface representor for multicast routing
+ * @dev: network device being used
+ * @bytes_in: statistic; bytes ingressing
+ * @bytes_out: statistic; bytes egresing
+ * @pkt_in: statistic; packets ingressing
+ * @pkt_out: statistic; packets egressing
+ * @rate_limit: Traffic shaping (NI)
+ * @threshold: TTL threshold
+ * @flags: Control flags
+ * @link: Physical interface index
+ * @dev_parent_id: device parent id
+ * @local: Local address
+ * @remote: Remote address for tunnels
+ */
+struct vif_device {
+ struct net_device *dev;
+ unsigned long bytes_in, bytes_out;
+ unsigned long pkt_in, pkt_out;
+ unsigned long rate_limit;
+ unsigned char threshold;
+ unsigned short flags;
+ int link;
+
+ /* Currently only used by ipmr */
+ struct netdev_phys_item_id dev_parent_id;
+ __be32 local, remote;
+};
+
+struct vif_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct net_device *dev;
+ unsigned short vif_index;
+ unsigned short vif_flags;
+ u32 tb_id;
+};
+
+static inline int mr_call_vif_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_vif_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+#ifndef MAXVIFS
+/* This one is nasty; value is defined in uapi using different symbols for
+ * mroute and morute6 but both map into same 32.
+ */
+#define MAXVIFS 32
+#endif
+
+#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
+
+/* mfc_flags:
+ * MFC_STATIC - the entry was added statically (not by a routing daemon)
+ * MFC_OFFLOAD - the entry was offloaded to the hardware
+ */
+enum {
+ MFC_STATIC = BIT(0),
+ MFC_OFFLOAD = BIT(1),
+};
+
+/**
+ * struct mr_mfc - common multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @refcount: reference count for this entry
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ * @free: Operation used for freeing an entry under RCU
+ */
+struct mr_mfc {
+ struct rhlist_head mnode;
+ unsigned short mfc_parent;
+ int mfc_flags;
+
+ union {
+ struct {
+ unsigned long expires;
+ struct sk_buff_head unresolved;
+ } unres;
+ struct {
+ unsigned long last_assert;
+ int minvif;
+ int maxvif;
+ unsigned long bytes;
+ unsigned long pkt;
+ unsigned long wrong_if;
+ unsigned long lastuse;
+ unsigned char ttls[MAXVIFS];
+ refcount_t refcount;
+ } res;
+ } mfc_un;
+ struct list_head list;
+ struct rcu_head rcu;
+ void (*free)(struct rcu_head *head);
+};
+
+static inline void mr_cache_put(struct mr_mfc *c)
+{
+ if (refcount_dec_and_test(&c->mfc_un.res.refcount))
+ call_rcu(&c->rcu, c->free);
+}
+
+static inline void mr_cache_hold(struct mr_mfc *c)
+{
+ refcount_inc(&c->mfc_un.res.refcount);
+}
+
+struct mfc_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct mr_mfc *mfc;
+ u32 tb_id;
+};
+
+static inline int mr_call_mfc_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_mfc_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+struct mr_table;
+
+/**
+ * struct mr_table_ops - callbacks and info for protocol-specific ops
+ * @rht_params: parameters for accessing the MFC hash
+ * @cmparg_any: a hash key to be used for matching on (*,*) routes
+ */
+struct mr_table_ops {
+ const struct rhashtable_params *rht_params;
+ void *cmparg_any;
+};
+
+/**
+ * struct mr_table - a multicast routing table
+ * @list: entry within a list of multicast routing tables
+ * @net: net where this table belongs
+ * @ops: protocol specific operations
+ * @id: identifier of the table
+ * @mroute_sk: socket associated with the table
+ * @ipmr_expire_timer: timer for handling unresolved routes
+ * @mfc_unres_queue: list of unresolved MFC entries
+ * @vif_table: array containing all possible vifs
+ * @mfc_hash: Hash table of all resolved routes for easy lookup
+ * @mfc_cache_list: list of resovled routes for possible traversal
+ * @maxvif: Identifier of highest value vif currently in use
+ * @cache_resolve_queue_len: current size of unresolved queue
+ * @mroute_do_assert: Whether to inform userspace on wrong ingress
+ * @mroute_do_pim: Whether to receive IGMP PIMv1
+ * @mroute_reg_vif_num: PIM-device vif index
+ */
+struct mr_table {
+ struct list_head list;
+ possible_net_t net;
+ struct mr_table_ops ops;
+ u32 id;
+ struct sock __rcu *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ bool mroute_do_assert;
+ bool mroute_do_pim;
+ int mroute_reg_vif_num;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask);
+
+struct mr_table *
+mr_table_alloc(struct net *net, u32 id,
+ struct mr_table_ops *ops,
+ void (*expire_func)(struct timer_list *t),
+ void (*table_set)(struct mr_table *mrt,
+ struct net *net));
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent);
+void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
+void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
+
+int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm);
+int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock);
+
+int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock);
+#else
+static inline void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask)
+{
+}
+
+static inline void *
+mr_table_alloc(struct net *net, u32 id,
+ struct mr_table_ops *ops,
+ void (*expire_func)(struct timer_list *t),
+ void (*table_set)(struct mr_table *mrt,
+ struct net *net))
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_find_any_parent(struct mr_table *mrt,
+ int vifi)
+{
+ return NULL;
+}
+
+static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
+ int vifi, void *hasharg)
+{
+ return NULL;
+}
+
+static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm)
+{
+ return -EINVAL;
+}
+
+static inline int
+mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock)
+{
+ return -EINVAL;
+}
+
+static inline int mr_dump(struct net *net, struct notifier_block *nb,
+ unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock)
+{
+ return -EINVAL;
+}
+#endif
+
+static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
+{
+ return mr_mfc_find_parent(mrt, hasharg, -1);
+}
+
+#ifdef CONFIG_PROC_FS
+struct mr_vif_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ int ct;
+};
+
+struct mr_mfc_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ struct list_head *cache;
+
+ /* Lock protecting the mr_table's unresolved queue */
+ spinlock_t *lock;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
+void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mr_vif_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos);
+void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos);
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ struct mr_mfc_iter *it = seq->private;
+
+ it->mrt = mrt;
+ it->cache = NULL;
+ it->lock = lock;
+
+ return *pos ? mr_mfc_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+ struct mr_mfc_iter *it = seq->private;
+ struct mr_table *mrt = it->mrt;
+
+ if (it->cache == &mrt->mfc_unres_queue)
+ spin_unlock_bh(it->lock);
+ else if (it->cache == &mrt->mfc_cache_list)
+ rcu_read_unlock();
+}
+#else
+static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
+ loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ return NULL;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+#endif
+#endif
+#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 2a0391eea05c..2248a052061d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,7 +146,7 @@ struct proto_ops {
struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
- int *sockaddr_len, int peer);
+ int peer);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
@@ -295,10 +295,8 @@ int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
-int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
int *optlen);
int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index db84c516bcfb..35b79f47a13d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -79,6 +79,7 @@ enum {
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
+ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
/*
* Add your fresh new feature above and remember to update
@@ -145,6 +146,7 @@ enum {
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
+#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5eef6c8e2741..cf44503ea81a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -58,6 +58,7 @@ struct device;
struct phy_device;
struct dsa_port;
+struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
@@ -585,6 +586,15 @@ struct netdev_queue {
#endif
} ____cacheline_aligned_in_smp;
+extern int sysctl_fb_tunnels_only_for_init_net;
+
+static inline bool net_has_fallback_tunnels(const struct net *net)
+{
+ return net == &init_net ||
+ !IS_ENABLED(CONFIG_SYSCTL) ||
+ !sysctl_fb_tunnels_only_for_init_net;
+}
+
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -1381,8 +1391,6 @@ struct net_device_ops {
* @IFF_MACVLAN: Macvlan device
* @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
* underlying stacked devices
- * @IFF_IPVLAN_MASTER: IPvlan master device
- * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
@@ -1392,6 +1400,7 @@ struct net_device_ops {
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
+ * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1412,16 +1421,15 @@ enum netdev_priv_flags {
IFF_LIVE_ADDR_CHANGE = 1<<15,
IFF_MACVLAN = 1<<16,
IFF_XMIT_DST_RELEASE_PERM = 1<<17,
- IFF_IPVLAN_MASTER = 1<<18,
- IFF_IPVLAN_SLAVE = 1<<19,
- IFF_L3MDEV_MASTER = 1<<20,
- IFF_NO_QUEUE = 1<<21,
- IFF_OPENVSWITCH = 1<<22,
- IFF_L3MDEV_SLAVE = 1<<23,
- IFF_TEAM = 1<<24,
- IFF_RXFH_CONFIGURED = 1<<25,
- IFF_PHONY_HEADROOM = 1<<26,
- IFF_MACSEC = 1<<27,
+ IFF_L3MDEV_MASTER = 1<<18,
+ IFF_NO_QUEUE = 1<<19,
+ IFF_OPENVSWITCH = 1<<20,
+ IFF_L3MDEV_SLAVE = 1<<21,
+ IFF_TEAM = 1<<22,
+ IFF_RXFH_CONFIGURED = 1<<23,
+ IFF_PHONY_HEADROOM = 1<<24,
+ IFF_MACSEC = 1<<25,
+ IFF_NO_RX_HANDLER = 1<<26,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1442,8 +1450,6 @@ enum netdev_priv_flags {
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
-#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
@@ -1451,6 +1457,7 @@ enum netdev_priv_flags {
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
#define IFF_MACSEC IFF_MACSEC
+#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
/**
* struct net_device - The DEVICE structure.
@@ -1656,6 +1663,7 @@ enum netdev_priv_flags {
* @priomap: XXX: need comments on this one
* @phydev: Physical device may attach itself
* for hardware timestamping
+ * @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
@@ -1798,11 +1806,17 @@ struct net_device {
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
+#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
+#endif
struct in_device __rcu *ip_ptr;
+#if IS_ENABLED(CONFIG_DECNET)
struct dn_dev __rcu *dn_ptr;
+#endif
struct inet6_dev __rcu *ip6_ptr;
+#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
+#endif
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1933,6 +1947,7 @@ struct net_device {
struct netprio_map __rcu *priomap;
#endif
struct phy_device *phydev;
+ struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
@@ -2300,43 +2315,49 @@ struct netdev_lag_lower_state_info {
#include <linux/notifier.h>
-/* netdevice notifier chain. Please remember to update the rtnetlink
- * notification exclusion list in rtnetlink_event() when adding new
- * types.
+/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
+ * and the rtnetlink notification exclusion list in rtnetlink_event() when
+ * adding new types.
*/
-#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
-#define NETDEV_DOWN 0x0002
-#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+enum netdev_cmd {
+ NETDEV_UP = 1, /* For now you can't veto a device up/down */
+ NETDEV_DOWN,
+ NETDEV_REBOOT, /* Tell a protocol stack a network interface
detected a hardware crash and restarted
- we can use this eg to kick tcp sessions
once done */
-#define NETDEV_CHANGE 0x0004 /* Notify device state change */
-#define NETDEV_REGISTER 0x0005
-#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
-#define NETDEV_CHANGEADDR 0x0008
-#define NETDEV_GOING_DOWN 0x0009
-#define NETDEV_CHANGENAME 0x000A
-#define NETDEV_FEAT_CHANGE 0x000B
-#define NETDEV_BONDING_FAILOVER 0x000C
-#define NETDEV_PRE_UP 0x000D
-#define NETDEV_PRE_TYPE_CHANGE 0x000E
-#define NETDEV_POST_TYPE_CHANGE 0x000F
-#define NETDEV_POST_INIT 0x0010
-#define NETDEV_UNREGISTER_FINAL 0x0011
-#define NETDEV_RELEASE 0x0012
-#define NETDEV_NOTIFY_PEERS 0x0013
-#define NETDEV_JOIN 0x0014
-#define NETDEV_CHANGEUPPER 0x0015
-#define NETDEV_RESEND_IGMP 0x0016
-#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
-#define NETDEV_CHANGEINFODATA 0x0018
-#define NETDEV_BONDING_INFO 0x0019
-#define NETDEV_PRECHANGEUPPER 0x001A
-#define NETDEV_CHANGELOWERSTATE 0x001B
-#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
-#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
-#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
+ NETDEV_CHANGE, /* Notify device state change */
+ NETDEV_REGISTER,
+ NETDEV_UNREGISTER,
+ NETDEV_CHANGEMTU, /* notify after mtu change happened */
+ NETDEV_CHANGEADDR,
+ NETDEV_GOING_DOWN,
+ NETDEV_CHANGENAME,
+ NETDEV_FEAT_CHANGE,
+ NETDEV_BONDING_FAILOVER,
+ NETDEV_PRE_UP,
+ NETDEV_PRE_TYPE_CHANGE,
+ NETDEV_POST_TYPE_CHANGE,
+ NETDEV_POST_INIT,
+ NETDEV_RELEASE,
+ NETDEV_NOTIFY_PEERS,
+ NETDEV_JOIN,
+ NETDEV_CHANGEUPPER,
+ NETDEV_RESEND_IGMP,
+ NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
+ NETDEV_CHANGEINFODATA,
+ NETDEV_BONDING_INFO,
+ NETDEV_PRECHANGEUPPER,
+ NETDEV_CHANGELOWERSTATE,
+ NETDEV_UDP_TUNNEL_PUSH_INFO,
+ NETDEV_UDP_TUNNEL_DROP_INFO,
+ NETDEV_CHANGE_TX_QUEUE_LEN,
+ NETDEV_CVLAN_FILTER_PUSH_INFO,
+ NETDEV_CVLAN_FILTER_DROP_INFO,
+ NETDEV_SVLAN_FILTER_PUSH_INFO,
+ NETDEV_SVLAN_FILTER_DROP_INFO,
+};
+const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -4217,16 +4238,6 @@ static inline bool netif_is_macvlan_port(const struct net_device *dev)
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_SLAVE;
-}
-
-static inline bool netif_is_ipvlan_port(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_MASTER;
-}
-
static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index b4d741195c28..beee8bffe49e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -16,6 +16,5 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
- struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 14529511c4b8..9077b3ebea08 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -281,6 +281,8 @@ int xt_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
+int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks);
+
unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
@@ -301,6 +303,7 @@ int xt_data_to_user(void __user *dst, const void *src,
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
+struct xt_counters *xt_counters_alloc(unsigned int counters);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
@@ -509,7 +512,7 @@ void xt_compat_unlock(u_int8_t af);
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
void xt_compat_flush_offsets(u_int8_t af);
-void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_init_offsets(u8 af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 9cd72aab76fe..90d81ee9e6a0 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -13,6 +13,7 @@
struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_nvmem_mac_address(struct device_node *np, void *addr);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
@@ -25,6 +26,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
return NULL;
}
+static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr)
+{
+ return -ENODEV;
+}
+
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7c4c2379e010..f0b5870a6d40 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -999,6 +999,14 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
+/* The gen10g_* functions are the old Clause 45 stub */
+int gen10g_config_aneg(struct phy_device *phydev);
+int gen10g_read_status(struct phy_device *phydev);
+int gen10g_no_soft_reset(struct phy_device *phydev);
+int gen10g_config_init(struct phy_device *phydev);
+int gen10g_suspend(struct phy_device *phydev);
+int gen10g_resume(struct phy_device *phydev);
+
static inline int phy_read_status(struct phy_device *phydev)
{
if (!phydev->drv)
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index bd137c273d38..50eeae025f1e 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -73,8 +73,10 @@ struct phylink_mac_ops {
void (*mac_config)(struct net_device *ndev, unsigned int mode,
const struct phylink_link_state *state);
void (*mac_an_restart)(struct net_device *ndev);
- void (*mac_link_down)(struct net_device *ndev, unsigned int mode);
+ void (*mac_link_down)(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface);
void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface,
struct phy_device *phy);
};
@@ -161,25 +163,31 @@ void mac_an_restart(struct net_device *ndev);
* mac_link_down() - take the link down
* @ndev: a pointer to a &struct net_device for the MAC.
* @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
*
* If @mode is not an in-band negotiation mode (as defined by
* phylink_autoneg_inband()), force the link down and disable any
- * Energy Efficient Ethernet MAC configuration.
+ * Energy Efficient Ethernet MAC configuration. Interface type
+ * selection must be done in mac_config().
*/
-void mac_link_down(struct net_device *ndev, unsigned int mode);
+void mac_link_down(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface);
/**
* mac_link_up() - allow the link to come up
* @ndev: a pointer to a &struct net_device for the MAC.
* @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
* @phy: any attached phy
*
* If @mode is not an in-band negotiation mode (as defined by
* phylink_autoneg_inband()), allow the link to come up. If @phy
* is non-%NULL, configure Energy Efficient Ethernet by calling
* phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Interface type selection must be done in mac_config().
*/
void mac_link_up(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface,
struct phy_device *phy);
#endif
@@ -211,9 +219,6 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
-int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
-int phylink_ethtool_get_module_eeprom(struct phylink *,
- struct ethtool_eeprom *, u8 *);
int phylink_get_eee_err(struct phylink *);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index a079656b614c..059242030631 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -75,5 +75,9 @@ void __init ptp_classifier_init(void);
static inline void ptp_classifier_init(void)
{
}
+static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
+{
+ return PTP_CLASS_NONE;
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index e6335227b844..6894976b54e3 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -296,13 +296,14 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
+ /* The READ_ONCE in __ptr_ring_peek guarantees that anyone
+ * accessing data through the pointer is up to date. Pairs
+ * with smp_wmb in __ptr_ring_produce.
+ */
ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);
- /* Make sure anyone accessing data through the pointer is up to date. */
- /* Pairs with smp_wmb in __ptr_ring_produce. */
- smp_read_barrier_depends();
return ptr;
}
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 2b3b350e07b7..13c8ab171437 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33
-#define FW_REVISION_VERSION 1
+#define FW_REVISION_VERSION 11
#define FW_ENGINEERING_VERSION 0
/***********************/
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 9db02856623b..d9416ad5ef59 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -105,7 +105,7 @@
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
-#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4cc9b37b8d95..938df614cb6a 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -753,8 +753,8 @@ struct e4_ystorm_iscsi_task_ag_ctx {
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 15e398c7230e..b5b2bc9eacca 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -483,6 +483,15 @@ struct qed_int_info {
u8 used_cnt;
};
+#define QED_NVM_SIGNATURE 0x12435687
+
+enum qed_nvm_flash_cmd {
+ QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
+ QED_NVM_FLASH_CMD_FILE_START = 0x3,
+ QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
+ QED_NVM_FLASH_CMD_NVM_MAX,
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
@@ -658,6 +667,16 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
+ * @brief nvm_flash - Flash nvm data.
+ *
+ * @param cdev
+ * @param name - file containing the data
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*nvm_flash)(struct qed_dev *cdev, const char *name);
+
+/**
* @brief nvm_get_image - reads an entire image from nvram
*
* @param cdev
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index c1a446ebe362..480a57eb36cc 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -51,6 +51,8 @@
#define RDMA_MAX_CQS (64 * 1024)
#define RDMA_MAX_TIDS (128 * 1024 - 1)
#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_XRC_SRQS (1024)
+#define RDMA_MAX_SRQS (32 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index e15e0da71240..193bcef302e1 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -59,6 +59,9 @@ enum roce_async_events_type {
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
+ ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
+ ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 668a21f04b09..1f8ad121eb43 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -152,25 +152,25 @@ struct rhashtable_params {
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @p: Configuration parameters
* @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
- struct rhashtable_params p;
unsigned int max_elems;
+ struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 1fdcde96eb65..5225832bd6ff 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -33,9 +33,11 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+extern int rtnl_lock_killable(void);
extern wait_queue_head_t netdev_unregistering_wq;
-extern struct mutex net_mutex;
+extern struct rw_semaphore pernet_ops_rwsem;
+extern struct rw_semaphore net_rwsem;
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 22b2131bcdcd..aa5d4eb725f5 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -248,6 +248,24 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
+/**
+ * sg_init_marker - Initialize markers in sg table
+ * @sgl: The SG table
+ * @nents: Number of entries in table
+ *
+ **/
+static inline void sg_init_marker(struct scatterlist *sgl,
+ unsigned int nents)
+{
+#ifdef CONFIG_DEBUG_SG
+ unsigned int i;
+
+ for (i = 0; i < nents; i++)
+ sgl[i].sg_magic = SG_MAGIC;
+#endif
+ sg_mark_end(&sgl[nents - 1]);
+}
+
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index e724d5a3dd80..ebce9e24906a 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -422,10 +422,11 @@ struct sfp_upstream_ops {
#if IS_ENABLED(CONFIG_SFP)
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
-phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
+phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
@@ -444,18 +445,19 @@ static inline int sfp_parse_port(struct sfp_bus *bus,
return PORT_OTHER;
}
-static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- return PHY_INTERFACE_MODE_NA;
-}
-
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *support)
{
}
+static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *link_modes)
+{
+ return PHY_INTERFACE_MODE_NA;
+}
+
static inline int sfp_get_module_info(struct sfp_bus *bus,
struct ethtool_modinfo *modinfo)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 99df17109e1b..9065477ed255 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -466,6 +466,9 @@ struct ubuf_info {
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
+void mm_unaccount_pinned_pages(struct mmpin *mmp);
+
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);
@@ -669,6 +672,7 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
+ int ip_defrag_offset;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index e2b6bd4fe977..ea50f4a65816 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -287,6 +287,7 @@ struct ucred {
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
+#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -381,5 +382,5 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown(int fd, int how);
-
+extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index c20acfcf647d..e0e98000b665 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -468,6 +468,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -487,6 +490,18 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
return -EOPNOTSUPP;
}
+static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+{
+ return -EOPNOTSUPP;
+}
+static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+{
+ return NULL;
+}
#endif
enum {
@@ -554,6 +569,33 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
+void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
+void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3);
+void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4);
+void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5);
+void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6);
+void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8);
+void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9);
+void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10);
+void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10, u64 arg11);
+void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 64ed7064f1fa..22c5a46e9693 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,4 +35,10 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+struct bpf_raw_event_map {
+ struct tracepoint *tp;
+ void *bpf_func;
+ u32 num_args;
+} __aligned(32);
+
#endif