aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt214
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst33
-rw-r--r--Documentation/networking/device_drivers/pensando/ionic.rst43
-rw-r--r--Documentation/networking/devlink-info-versions.rst16
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c130
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c21
-rw-r--r--drivers/infiniband/hw/mlx5/main.c109
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/net/dsa/mt7530.c371
-rw-r--r--drivers/net/dsa/mt7530.h61
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c31
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c37
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c88
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c570
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c770
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c1243
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c976
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c2308
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1060
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h212
-rw-r--r--drivers/net/ethernet/pensando/Kconfig32
-rw-r--r--drivers/net/ethernet/pensando/Makefile6
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h73
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus.h16
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c292
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c248
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.h34
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c500
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h299
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.h14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c779
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.h9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h2482
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2274
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h277
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c549
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_regs.h136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c150
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h35
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c310
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h53
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c925
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h15
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/usb/r8152.c196
-rw-r--r--include/linux/mlx5/device.h7
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mlx5/eswitch.h8
-rw-r--r--include/linux/mlx5/fs.h33
-rw-r--r--include/linux/mlx5/mlx5_ifc.h235
-rw-r--r--include/net/devlink.h7
-rw-r--r--include/net/tls.h48
-rw-r--r--include/net/vxlan.h4
-rw-r--r--net/rds/af_rds.c4
-rw-r--r--net/rds/bind.c4
-rw-r--r--net/rds/send.c4
-rw-r--r--net/sched/sch_cbs.c2
-rw-r--r--net/tls/tls_device.c78
-rw-r--r--net/tls/tls_main.c46
-rw-r--r--net/tls/tls_sw.c6
-rw-r--r--net/vmw_vsock/virtio_transport_common.c9
105 files changed, 23200 insertions, 710 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index 47aa205ee0bd..c5ed5d25f642 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -35,6 +35,42 @@ Required properties for the child nodes within ports container:
- phy-mode: String, must be either "trgmii" or "rgmii" for port labeled
"cpu".
+Port 5 of the switch is muxed between:
+1. GMAC5: GMAC5 can interface with another external MAC or PHY.
+2. PHY of port 0 or port 4: PHY interfaces with an external MAC like 2nd GMAC
+ of the SOC. Used in many setups where port 0/4 becomes the WAN port.
+ Note: On a MT7621 SOC with integrated switch: 2nd GMAC can only connected to
+ GMAC5 when the gpios for RGMII2 (GPIO 22-33) are not used and not
+ connected to external component!
+
+Port 5 modes/configurations:
+1. Port 5 is disabled and isolated: An external phy can interface to the 2nd
+ GMAC of the SOC.
+ In the case of a build-in MT7530 switch, port 5 shares the RGMII bus with 2nd
+ GMAC and an optional external phy. Mind the GPIO/pinctl settings of the SOC!
+2. Port 5 is muxed to PHY of port 0/4: Port 0/4 interfaces with 2nd GMAC.
+ It is a simple MAC to PHY interface, port 5 needs to be setup for xMII mode
+ and RGMII delay.
+3. Port 5 is muxed to GMAC5 and can interface to an external phy.
+ Port 5 becomes an extra switch port.
+ Only works on platform where external phy TX<->RX lines are swapped.
+ Like in the Ubiquiti ER-X-SFP.
+4. Port 5 is muxed to GMAC5 and interfaces with the 2nd GAMC as 2nd CPU port.
+ Currently a 2nd CPU port is not supported by DSA code.
+
+Depending on how the external PHY is wired:
+1. normal: The PHY can only connect to 2nd GMAC but not to the switch
+2. swapped: RGMII TX, RX are swapped; external phy interface with the switch as
+ a ethernet port. But can't interface to the 2nd GMAC.
+
+Based on the DT the port 5 mode is configured.
+
+Driver tries to lookup the phy-handle of the 2nd GMAC of the master device.
+When phy-handle matches PHY of port 0 or 4 then port 5 set-up as mode 2.
+phy-mode must be set, see also example 2 below!
+ * mt7621: phy-mode = "rgmii-txid";
+ * mt7623: phy-mode = "rgmii";
+
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required, optional properties and how the integrated switch subnodes must
be specified.
@@ -94,3 +130,181 @@ Example:
};
};
};
+
+Example 2: MT7621: Port 4 is WAN port: 2nd GMAC -> Port 5 -> PHY port 4.
+
+&eth {
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+ phy-mode = "rgmii-txid";
+ phy-handle = <&phy4>;
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Internal phy */
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+
+ mt7530: switch@1f {
+ compatible = "mediatek,mt7621";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1f>;
+ pinctrl-names = "default";
+ mediatek,mcm;
+
+ resets = <&rstctrl 2>;
+ reset-names = "mcm";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+/* Commented out. Port 4 is handled by 2nd GMAC.
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+*/
+
+ cpu_port0: port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+};
+
+Example 3: MT7621: Port 5 is connected to external PHY: Port 5 -> external PHY.
+
+&eth {
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* External phy */
+ ephy5: ethernet-phy@7 {
+ reg = <7>;
+ };
+
+ mt7530: switch@1f {
+ compatible = "mediatek,mt7621";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1f>;
+ pinctrl-names = "default";
+ mediatek,mcm;
+
+ resets = <&rstctrl 2>;
+ reset-names = "mcm";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "lan5";
+ phy-mode = "rgmii";
+ phy-handle = <&ephy5>;
+ };
+
+ cpu_port0: port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index 2b7fefe72351..57fec66c5419 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -23,6 +23,7 @@ Contents:
intel/ice
google/gve
mellanox/mlx5
+ pensando/ionic
.. only:: subproject
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index b30a63dbf4b7..d071c6b49e1f 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -11,6 +11,7 @@ Contents
- `Enabling the driver and kconfig options`_
- `Devlink info`_
+- `Devlink parameters`_
- `Devlink health reporters`_
- `mlx5 tracepoints`_
@@ -122,6 +123,38 @@ User command example::
stored:
fw.version 16.26.0100
+Devlink parameters
+==================
+
+flow_steering_mode: Device flow steering mode
+---------------------------------------------
+The flow steering mode parameter controls the flow steering mode of the driver.
+Two modes are supported:
+1. 'dmfs' - Device managed flow steering.
+2. 'smfs - Software/Driver managed flow steering.
+
+In DMFS mode, the HW steering entities are created and managed through the
+Firmware.
+In SMFS mode, the HW steering entities are created and managed though by
+the driver directly into Hardware without firmware intervention.
+
+SMFS mode is faster and provides better rule inserstion rate compared to default DMFS mode.
+
+User command examples:
+
+- Set SMFS flow steering mode::
+
+ $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
+
+- Read device flow steering mode::
+
+ $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
+ pci/0000:06:00.0:
+ name flow_steering_mode type driver-specific
+ values:
+ cmode runtime value smfs
+
+
Devlink health reporters
========================
diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/pensando/ionic.rst
new file mode 100644
index 000000000000..67b6839d516b
--- /dev/null
+++ b/Documentation/networking/device_drivers/pensando/ionic.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+==========================================================
+Linux* Driver for the Pensando(R) Ethernet adapter family
+==========================================================
+
+Pensando Linux Ethernet driver.
+Copyright(c) 2019 Pensando Systems, Inc
+
+Contents
+========
+
+- Identifying the Adapter
+- Support
+
+Identifying the Adapter
+=======================
+
+To find if one or more Pensando PCI Ethernet devices are installed on the
+host, check for the PCI devices::
+
+ $ lspci -d 1dd8:
+ b5:00.0 Ethernet controller: Device 1dd8:1002
+ b6:00.0 Ethernet controller: Device 1dd8:1002
+
+If such devices are listed as above, then the ionic.ko driver should find
+and configure them for use. There should be log entries in the kernel
+messages such as these::
+
+ $ dmesg | grep ionic
+ ionic Pensando Ethernet NIC Driver, ver 0.15.0-k
+ ionic 0000:b5:00.0 enp181s0: renamed from eth0
+ ionic 0000:b6:00.0 enp182s0: renamed from eth0
+
+Support
+=======
+For general Linux networking support, please use the netdev mailing
+list, which is monitored by Pensando personnel::
+ netdev@vger.kernel.org
+
+For more specific support needs, please use the Pensando driver support
+email::
+ drivers@pensando.io
diff --git a/Documentation/networking/devlink-info-versions.rst b/Documentation/networking/devlink-info-versions.rst
index 4316342b7746..4914f581b1fd 100644
--- a/Documentation/networking/devlink-info-versions.rst
+++ b/Documentation/networking/devlink-info-versions.rst
@@ -14,11 +14,27 @@ board.rev
Board design revision.
+asic.id
+=======
+
+ASIC design identifier.
+
+asic.rev
+========
+
+ASIC design revision.
+
board.manufacture
=================
An identifier of the company or the facility which produced the part.
+fw
+==
+
+Overall firmware version, often representing the collection of
+fw.mgmt, fw.app, etc.
+
fw.mgmt
=======
diff --git a/MAINTAINERS b/MAINTAINERS
index a081c477d1d1..c1fb8fb3b2ee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12608,6 +12608,14 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/peaq-wmi.c
+PENSANDO ETHERNET DRIVERS
+M: Shannon Nelson <snelson@pensando.io>
+M: Pensando Drivers <drivers@pensando.io>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/networking/device_drivers/pensando/ionic.rst
+F: drivers/net/ethernet/pensando/
+
PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 635bb4b447fb..e6df5b95ed47 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -474,7 +474,8 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname,
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
- return ctx->getsockopt(sk, level, optname, optval, optlen);
+ return ctx->sk_proto->getsockopt(sk, level,
+ optname, optval, optlen);
return do_chtls_getsockopt(sk, optval, optlen);
}
@@ -541,7 +542,8 @@ static int chtls_setsockopt(struct sock *sk, int level, int optname,
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
- return ctx->setsockopt(sk, level, optname, optval, optlen);
+ return ctx->sk_proto->setsockopt(sk, level,
+ optname, optval, optlen);
return do_chtls_setsockopt(sk, optname, optval, optlen);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 6c8645033102..4937947400cd 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -186,136 +186,6 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
return err;
}
-int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t *addr, u32 *obj_id)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
- u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
- unsigned long *block_map;
- u64 icm_start_addr;
- u32 log_icm_size;
- u32 num_blocks;
- u32 max_blocks;
- u64 block_idx;
- void *sw_icm;
- int ret;
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
- MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
- steering_sw_icm_start_address);
- log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
- block_map = dm->steering_sw_icm_alloc_blocks;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
- header_modify_sw_icm_start_address);
- log_icm_size = MLX5_CAP_DEV_MEM(dev,
- log_header_modify_sw_icm_size);
- block_map = dm->header_modify_sw_icm_alloc_blocks;
- break;
- default:
- return -EINVAL;
- }
-
- num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
- spin_lock(&dm->lock);
- block_idx = bitmap_find_next_zero_area(block_map,
- max_blocks,
- 0,
- num_blocks, 0);
-
- if (block_idx < max_blocks)
- bitmap_set(block_map,
- block_idx, num_blocks);
-
- spin_unlock(&dm->lock);
-
- if (block_idx >= max_blocks)
- return -ENOMEM;
-
- sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
- icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
- icm_start_addr);
- MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
-
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- spin_lock(&dm->lock);
- bitmap_clear(block_map,
- block_idx, num_blocks);
- spin_unlock(&dm->lock);
-
- return ret;
- }
-
- *addr = icm_start_addr;
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
- return 0;
-}
-
-int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t addr, u32 obj_id)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
- u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
- unsigned long *block_map;
- u32 num_blocks;
- u64 start_idx;
- int err;
-
- num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- start_idx =
- (addr - MLX5_CAP64_DEV_MEM(
- dev, steering_sw_icm_start_address)) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- block_map = dm->steering_sw_icm_alloc_blocks;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- start_idx =
- (addr -
- MLX5_CAP64_DEV_MEM(
- dev, header_modify_sw_icm_start_address)) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- block_map = dm->header_modify_sw_icm_alloc_blocks;
- break;
- default:
- return -EINVAL;
- }
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- spin_lock(&dm->lock);
- bitmap_clear(block_map,
- start_idx, num_blocks);
- spin_unlock(&dm->lock);
-
- return 0;
-}
-
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
{
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 0572dcba6eae..169cab4915e3 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -65,8 +65,4 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
-int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t *addr, u32 *obj_id);
-int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t addr, u32 obj_id);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b8841355fcd5..1c8f04abee0c 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -322,11 +322,11 @@ void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
switch (maction->flow_action_raw.sub_type) {
case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
- maction->flow_action_raw.action_id);
+ maction->flow_action_raw.modify_hdr);
break;
case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
- maction->flow_action_raw.action_id);
+ maction->flow_action_raw.pkt_reformat);
break;
case MLX5_IB_FLOW_ACTION_DECAP:
break;
@@ -352,10 +352,11 @@ mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
if (!maction)
return ERR_PTR(-ENOMEM);
- ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
- &maction->flow_action_raw.action_id);
+ maction->flow_action_raw.modify_hdr =
+ mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
- if (ret) {
+ if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
+ ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
kfree(maction);
return ERR_PTR(ret);
}
@@ -479,11 +480,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
if (ret)
return ret;
- ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
- in, namespace,
- &maction->flow_action_raw.action_id);
- if (ret)
+ maction->flow_action_raw.pkt_reformat =
+ mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
+ in, namespace);
+ if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
+ ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
return ret;
+ }
maction->flow_action_raw.sub_type =
MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0569bcab02d4..4e9f1507ffd9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2280,6 +2280,7 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
return -EOPNOTSUPP;
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
if (!capable(CAP_SYS_RAWIO) ||
!capable(CAP_NET_RAW))
return -EPERM;
@@ -2344,20 +2345,20 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
struct uverbs_attr_bundle *attrs,
int type)
{
- struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
u64 act_size;
int err;
/* Allocation size must a multiple of the basic block size
* and a power of 2.
*/
- act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
- err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
- to_mucontext(ctx)->devx_uid, &dm->dev_addr,
- &dm->icm_dm.obj_id);
+ err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
+ to_mucontext(ctx)->devx_uid, &dm->dev_addr,
+ &dm->icm_dm.obj_id);
if (err)
return err;
@@ -2365,9 +2366,9 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&dm->dev_addr, sizeof(dm->dev_addr));
if (err)
- mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
- to_mucontext(ctx)->devx_uid,
- dm->dev_addr, dm->icm_dm.obj_id);
+ mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
+ to_mucontext(ctx)->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
return err;
}
@@ -2407,8 +2408,14 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
attrs);
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ err = handle_alloc_dm_sw_icm(context, dm,
+ attr, attrs,
+ MLX5_SW_ICM_TYPE_STEERING);
+ break;
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
+ err = handle_alloc_dm_sw_icm(context, dm,
+ attr, attrs,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY);
break;
default:
err = -EOPNOTSUPP;
@@ -2428,6 +2435,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm);
u32 page_idx;
@@ -2439,19 +2447,23 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
if (ret)
return ret;
- page_idx = (dm->dev_addr -
- pci_resource_start(dm_db->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(dm_db->dev,
- memic_bar_start_addr)) >>
- PAGE_SHIFT;
+ page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
+ PAGE_SHIFT;
bitmap_clear(ctx->dm_pages, page_idx,
DIV_ROUND_UP(dm->size, PAGE_SIZE));
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
+ dm->size, ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
+ if (ret)
+ return ret;
+ break;
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
- ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
+ ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ dm->size, ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
if (ret)
return ret;
break;
@@ -2646,7 +2658,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return -EINVAL;
action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- action->modify_id = maction->flow_action_raw.action_id;
+ action->modify_hdr =
+ maction->flow_action_raw.modify_hdr;
return 0;
}
if (maction->flow_action_raw.sub_type ==
@@ -2663,8 +2676,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
return -EINVAL;
action->action |=
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- action->reformat_id =
- maction->flow_action_raw.action_id;
+ action->pkt_reformat =
+ maction->flow_action_raw.pkt_reformat;
return 0;
}
/* fall through */
@@ -6096,8 +6109,6 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
- struct mlx5_core_dev *mdev = dev->mdev;
-
mlx5_ib_cleanup_multiport_master(dev);
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
srcu_barrier(&dev->mr_srcu);
@@ -6105,29 +6116,11 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
}
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
-
- WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
- !bitmap_empty(
- dev->dm.steering_sw_icm_alloc_blocks,
- BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
-
- kfree(dev->dm.steering_sw_icm_alloc_blocks);
-
- WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
- !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
- BIT(MLX5_CAP_DEV_MEM(
- mdev, log_header_modify_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
-
- kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
}
static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- u64 header_modify_icm_blocks = 0;
- u64 steering_icm_blocks = 0;
int err;
int i;
@@ -6174,51 +6167,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
- if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
- if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
- steering_icm_blocks =
- BIT(MLX5_CAP_DEV_MEM(mdev,
- log_steering_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
-
- dev->dm.steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
- if (!dev->dm.steering_sw_icm_alloc_blocks)
- goto err_mp;
- }
-
- if (MLX5_CAP64_DEV_MEM(mdev,
- header_modify_sw_icm_start_address)) {
- header_modify_icm_blocks = BIT(
- MLX5_CAP_DEV_MEM(
- mdev, log_header_modify_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
-
- dev->dm.header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
- if (!dev->dm.header_modify_sw_icm_alloc_blocks)
- goto err_dm;
- }
- }
-
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
err = init_srcu_struct(&dev->mr_srcu);
if (err)
- goto err_dm;
+ goto err_mp;
}
return 0;
-err_dm:
- kfree(dev->dm.steering_sw_icm_alloc_blocks);
- kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
-
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 9ae587b74b12..125a507c10ed 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -868,7 +868,10 @@ struct mlx5_ib_flow_action {
struct {
struct mlx5_ib_dev *dev;
u32 sub_type;
- u32 action_id;
+ union {
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ };
} flow_action_raw;
};
};
@@ -881,8 +884,6 @@ struct mlx5_dm {
*/
spinlock_t lock;
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
- unsigned long *steering_sw_icm_alloc_blocks;
- unsigned long *header_modify_sw_icm_alloc_blocks;
};
struct mlx5_read_counters_attr {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index c48e29486b10..1d8d36de4d20 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -13,7 +13,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -633,61 +633,75 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
-static void mt7530_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
+ u8 tx_delay = 0;
+ int val;
- if (phy_is_pseudo_fixed_link(phydev)) {
- dev_dbg(priv->dev, "phy-mode for master device = %x\n",
- phydev->interface);
+ mutex_lock(&priv->reg_mutex);
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, phydev->interface);
+ val = mt7530_read(priv, MT7530_MHWTRAP);
- if (priv->id == ID_MT7530) {
- /* Setup RX circuit, relevant PAD and driving on the
- * host which must be placed after the setup on the
- * device side is all finished.
- */
- mt7623_pad_clk_setup(ds);
- }
- } else {
- u16 lcl_adv = 0, rmt_adv = 0;
- u8 flowctrl;
- u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+ val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
+ val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
- switch (phydev->speed) {
- case SPEED_1000:
- mcr |= PMCR_FORCE_SPEED_1000;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_SPEED_100;
- break;
- }
+ switch (priv->p5_intf_sel) {
+ case P5_INTF_SEL_PHY_P0:
+ /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
+ val |= MHWTRAP_PHY0_SEL;
+ /* fall through */
+ case P5_INTF_SEL_PHY_P4:
+ /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
+ val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+
+ /* Setup the MAC by default for the cpu port */
+ mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
+ break;
+ case P5_INTF_SEL_GMAC5:
+ /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
+ val &= ~MHWTRAP_P5_DIS;
+ break;
+ case P5_DISABLED:
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ default:
+ dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
+ priv->p5_intf_sel);
+ goto unlock_exit;
+ }
- if (phydev->link)
- mcr |= PMCR_FORCE_LNK;
+ /* Setup RGMII settings */
+ if (phy_interface_mode_is_rgmii(interface)) {
+ val |= MHWTRAP_P5_RGMII_MODE;
- if (phydev->duplex) {
- mcr |= PMCR_FORCE_FDX;
+ /* P5 RGMII RX Clock Control: delay setting for 1000M */
+ mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
- if (phydev->pause)
- rmt_adv = LPA_PAUSE_CAP;
- if (phydev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
+ /* Don't set delay in DSA mode */
+ if (!dsa_is_dsa_port(priv->ds, 5) &&
+ (interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ interface == PHY_INTERFACE_MODE_RGMII_ID))
+ tx_delay = 4; /* n * 0.5 ns */
- lcl_adv = linkmode_adv_to_lcl_adv_t(
- phydev->advertising);
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ /* P5 RGMII TX Clock Control: delay x */
+ mt7530_write(priv, MT7530_P5RGMIITXCR,
+ CSR_RGMII_TXC_CFG(0x10 + tx_delay));
- if (flowctrl & FLOW_CTRL_TX)
- mcr |= PMCR_TX_FC_EN;
- if (flowctrl & FLOW_CTRL_RX)
- mcr |= PMCR_RX_FC_EN;
- }
- mt7530_write(priv, MT7530_PMCR_P(port), mcr);
+ /* reduce P5 RGMII Tx driving, 8mA */
+ mt7530_write(priv, MT7530_IO_DRV_CR,
+ P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
+
+ mt7530_write(priv, MT7530_MHWTRAP, val);
+
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
+ val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+
+ priv->p5_interface = interface;
+
+unlock_exit:
+ mutex_unlock(&priv->reg_mutex);
}
static int
@@ -698,9 +712,6 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Setup the MAC by default for the cpu port */
- mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
-
/* Disable auto learning on the cpu port */
mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
@@ -731,9 +742,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
mutex_lock(&priv->reg_mutex);
- /* Setup the MAC for the user port */
- mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK);
-
/* Allow the user port gets connected to the cpu port and also
* restore the port matrix if the port is the member of a certain
* bridge.
@@ -742,7 +750,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_port_set_status(priv, port, 1);
+ mt7530_port_set_status(priv, port, 0);
mutex_unlock(&priv->reg_mutex);
@@ -1232,10 +1240,13 @@ static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret, i;
- u32 id, val;
- struct device_node *dn;
+ struct device_node *phy_node;
+ struct device_node *mac_np;
struct mt7530_dummy_poll p;
+ phy_interface_t interface;
+ struct device_node *dn;
+ u32 id, val;
+ int ret, i;
/* The parent node of master netdev which holds the common system
* controller also is the container for two GMACs nodes representing
@@ -1305,6 +1316,8 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
@@ -1321,6 +1334,40 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_port_disable(ds, i);
}
+ /* Setup port 5 */
+ priv->p5_intf_sel = P5_DISABLED;
+ interface = PHY_INTERFACE_MODE_NA;
+
+ if (!dsa_is_unused_port(ds, 5)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ interface = of_get_phy_mode(ds->ports[5].dn);
+ } else {
+ /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ for_each_child_of_node(dn, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ ret = of_property_read_u32(mac_np, "reg", &id);
+ if (ret < 0 || id != 1)
+ continue;
+
+ phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
+ if (phy_node->parent == priv->dev->of_node->parent) {
+ interface = of_get_phy_mode(mac_np);
+ id = of_mdio_parse_addr(ds->dev, phy_node);
+ if (id == 0)
+ priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ if (id == 4)
+ priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ }
+ of_node_put(phy_node);
+ break;
+ }
+ }
+
+ mt7530_setup_port5(ds, interface);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -1329,6 +1376,216 @@ mt7530_setup(struct dsa_switch *ds)
return 0;
}
+static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr_cur, mcr_new;
+
+ switch (port) {
+ case 0: /* Internal phy */
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (priv->p5_interface == state->interface)
+ break;
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+
+ mt7530_setup_port5(ds, state->interface);
+ break;
+ case 6: /* 1st cpu port */
+ if (priv->p6_interface == state->interface)
+ break;
+
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_TRGMII)
+ return;
+
+ /* Setup TX circuit incluing relevant PAD and driving */
+ mt7530_pad_clk_setup(ds, state->interface);
+
+ if (priv->id == ID_MT7530) {
+ /* Setup RX circuit, relevant PAD and driving on the
+ * host which must be placed after the setup on the
+ * device side is all finished.
+ */
+ mt7623_pad_clk_setup(ds);
+ }
+
+ priv->p6_interface = state->interface;
+ break;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+ return;
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+ __func__);
+ return;
+ }
+
+ mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
+ mcr_new = mcr_cur;
+ mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
+ PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
+ mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
+
+ /* Are we connected to external phy */
+ if (port == 5 && dsa_is_user_port(ds, 5))
+ mcr_new |= PMCR_EXT_PHY;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ mcr_new |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr_new |= PMCR_FORCE_SPEED_100;
+ break;
+ }
+ if (state->duplex == DUPLEX_FULL) {
+ mcr_new |= PMCR_FORCE_FDX;
+ if (state->pause & MLO_PAUSE_TX)
+ mcr_new |= PMCR_TX_FC_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ mcr_new |= PMCR_RX_FC_EN;
+ }
+
+ if (mcr_new != mcr_cur)
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+}
+
+static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_port_set_status(priv, port, 0);
+}
+
+static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_port_set_status(priv, port, 1);
+}
+
+static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0: /* Internal phy */
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 6: /* 1st cpu port */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_TRGMII)
+ goto unsupported;
+ break;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+unsupported:
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII) {
+ phylink_set(mask, 1000baseT_Full);
+ } else {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (state->interface != PHY_INTERFACE_MODE_MII) {
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+ if (port == 5)
+ phylink_set(mask, 1000baseX_Full);
+ }
+ }
+
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static int
+mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 pmsr;
+
+ if (port < 0 || port >= MT7530_NUM_PORTS)
+ return -EINVAL;
+
+ pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
+
+ state->link = (pmsr & PMSR_LINK);
+ state->an_complete = state->link;
+ state->duplex = !!(pmsr & PMSR_DPX);
+
+ switch (pmsr & PMSR_SPEED_MASK) {
+ case PMSR_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case PMSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case PMSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & PMSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & PMSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt7530_setup,
@@ -1337,7 +1594,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.phy_write = mt7530_phy_write,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
- .adjust_link = mt7530_adjust_link,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
.port_stp_state_set = mt7530_stp_state_set,
@@ -1350,6 +1606,11 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
+ .phylink_validate = mt7530_phylink_validate,
+ .phylink_mac_link_state = mt7530_phylink_mac_link_state,
+ .phylink_mac_config = mt7530_phylink_mac_config,
+ .phylink_mac_link_down = mt7530_phylink_mac_link_down,
+ .phylink_mac_link_up = mt7530_phylink_mac_link_up,
};
static const struct of_device_id mt7530_of_match[] = {
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index bfac90f48102..ccb9da8cad0d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -186,6 +186,7 @@ enum mt7530_vlan_port_attr {
/* Register for port MAC control register */
#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define PMCR_EXT_PHY BIT(17)
#define PMCR_MAC_MODE BIT(16)
#define PMCR_FORCE_MODE BIT(15)
#define PMCR_TX_EN BIT(14)
@@ -198,26 +199,20 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-#define PMCR_CPUP_LINK (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | \
- PMCR_FORCE_LNK)
-#define PMCR_USERP_LINK PMCR_COMMON_LINK
-#define PMCR_FIXED_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_FORCE_MODE | PMCR_TX_EN | \
- PMCR_RX_EN | PMCR_BACKPR_EN | \
- PMCR_BACKOFF_EN | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | \
- PMCR_FORCE_LNK)
-#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN)
+#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_SPEED_1000)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
+#define PMSR_EEE1G BIT(7)
+#define PMSR_EEE100M BIT(6)
+#define PMSR_RX_FC BIT(5)
+#define PMSR_TX_FC BIT(4)
+#define PMSR_SPEED_1000 BIT(3)
+#define PMSR_SPEED_100 BIT(2)
+#define PMSR_SPEED_10 0x00
+#define PMSR_SPEED_MASK (PMSR_SPEED_100 | PMSR_SPEED_1000)
+#define PMSR_DPX BIT(1)
+#define PMSR_LINK BIT(0)
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
@@ -251,6 +246,7 @@ enum mt7530_vlan_port_attr {
/* Register for hw trap modification */
#define MT7530_MHWTRAP 0x7804
+#define MHWTRAP_PHY0_SEL BIT(20)
#define MHWTRAP_MANUAL BIT(16)
#define MHWTRAP_P5_MAC_SEL BIT(13)
#define MHWTRAP_P6_DIS BIT(8)
@@ -408,6 +404,30 @@ struct mt7530_port {
u16 pvid;
};
+/* Port 5 interface select definitions */
+enum p5_interface_select {
+ P5_DISABLED = 0,
+ P5_INTF_SEL_PHY_P0,
+ P5_INTF_SEL_PHY_P4,
+ P5_INTF_SEL_GMAC5,
+};
+
+static const char *p5_intf_modes(unsigned int p5_interface)
+{
+ switch (p5_interface) {
+ case P5_DISABLED:
+ return "DISABLED";
+ case P5_INTF_SEL_PHY_P0:
+ return "PHY P0";
+ case P5_INTF_SEL_PHY_P4:
+ return "PHY P4";
+ case P5_INTF_SEL_GMAC5:
+ return "GMAC5";
+ default:
+ return "unknown";
+ }
+}
+
/* struct mt7530_priv - This is the main data structure for holding the state
* of the driver
* @dev: The device pointer
@@ -423,6 +443,8 @@ struct mt7530_port {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
+ * @p6_interface Holding the current port 6 interface
+ * @p5_intf_sel: Holding the current port 5 interface select
*/
struct mt7530_priv {
struct device *dev;
@@ -435,6 +457,9 @@ struct mt7530_priv {
struct gpio_desc *reset;
unsigned int id;
bool mcm;
+ phy_interface_t p6_interface;
+ phy_interface_t p5_interface;
+ unsigned int p5_intf_sel;
struct mt7530_port ports[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 93a2d4deb27c..2830dc283ce6 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -168,6 +168,7 @@ config ETHOC
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
+source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fb9155cffcff..061edd22f507 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -97,3 +97,4 @@ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
+obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 5402867272be..162d7d8fb295 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1348,7 +1348,7 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
return total;
}
-static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
+static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
{
int retries = 10;
u32 pending;
@@ -1360,6 +1360,31 @@ static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
} while (pending && --retries);
}
+#define DPNI_TX_PENDING_VER_MAJOR 7
+#define DPNI_TX_PENDING_VER_MINOR 13
+static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+ union dpni_statistics stats;
+ int retries = 10;
+ int err;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
+ DPNI_TX_PENDING_VER_MINOR) < 0)
+ goto out;
+
+ do {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
+ &stats);
+ if (err)
+ goto out;
+ if (stats.page_6.tx_pending_frames == 0)
+ return;
+ } while (--retries);
+
+out:
+ msleep(500);
+}
+
static int dpaa2_eth_stop(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -1379,7 +1404,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
* on WRIOP. After it finishes, wait until all remaining frames on Rx
* and Tx conf queues are consumed on NAPI poll.
*/
- msleep(500);
+ wait_for_egress_fq_empty(priv);
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
@@ -1395,7 +1420,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- wait_for_fq_empty(priv);
+ wait_for_ingress_fq_empty(priv);
disable_ch_napi(priv);
/* Empty the buffer pool */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 93076fe01b45..0aa1c34019bb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -28,6 +28,11 @@ static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
"[hw] rx nobuffer discards",
"[hw] tx discarded frames",
"[hw] tx confirmed frames",
+ "[hw] tx dequeued bytes",
+ "[hw] tx dequeued frames",
+ "[hw] tx rejected bytes",
+ "[hw] tx rejected frames",
+ "[hw] tx pending frames",
};
#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
@@ -188,27 +193,33 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
+ int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+ sizeof(dpni_stats.page_0),
+ sizeof(dpni_stats.page_1),
+ sizeof(dpni_stats.page_2),
+ sizeof(dpni_stats.page_3),
+ sizeof(dpni_stats.page_4),
+ sizeof(dpni_stats.page_5),
+ sizeof(dpni_stats.page_6),
+ };
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
/* Print standard counters, from DPNI statistics */
- for (j = 0; j <= 2; j++) {
+ for (j = 0; j <= 6; j++) {
+ /* We're not interested in pages 4 & 5 for now */
+ if (j == 4 || j == 5)
+ continue;
err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
j, &dpni_stats);
- if (err != 0)
+ if (err == -EINVAL)
+ /* Older firmware versions don't support all pages */
+ memset(&dpni_stats, 0, sizeof(dpni_stats));
+ else
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
- switch (j) {
- case 0:
- num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
- break;
- case 1:
- num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
- break;
- case 2:
- num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
- break;
- }
+
+ num_cnt = dpni_stats_page_size[j] / sizeof(u64);
for (k = 0; k < num_cnt; k++)
*(data + i++) = dpni_stats.raw.counter[k];
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 05e30893dee6..dd54e6953aeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1470,7 +1470,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
- * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
* @stat: Structure containing the statistics
*
* Return: '0' on Success; Error code otherwise.
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 3e8fc6c7a8da..fd583911b6c0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -416,6 +416,26 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
* lack of buffers
* @page_2.egress_discarded_frames: Egress discarded frame count
* @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @page3: Page_3 statistics structure
+ * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
+ * dequeued from egress FQs
+ * @page_3.egress_dequeue_frames: Cumulative count of the number of frames
+ * dequeued from egress FQs
+ * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in
+ * egress frames whose enqueue was rejected
+ * @page_3.egress_reject_frames: Cumulative count of the number of egress
+ * frames whose enqueue was rejected
+ * @page_4: Page_4 statistics structure: congestion points
+ * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+ * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+ * @page_5: Page_5 statistics structure: policer
+ * @page_5.policer_cnt_red: NUmber of red colored frames
+ * @page_5.policer_cnt_yellow: number of yellow colored frames
+ * @page_5.policer_cnt_green: number of green colored frames
+ * @page_5.policer_cnt_re_red: number of recolored red frames
+ * @page_5.policer_cnt_re_yellow: number of recolored yellow frames
+ * @page_6: Page_6 statistics structure
+ * @page_6.tx_pending_frames: total number of frames pending in egress FQs
* @raw: raw statistics structure, used to index counters
*/
union dpni_statistics {
@@ -443,6 +463,26 @@ union dpni_statistics {
u64 egress_confirmed_frames;
} page_2;
struct {
+ u64 egress_dequeue_bytes;
+ u64 egress_dequeue_frames;
+ u64 egress_reject_bytes;
+ u64 egress_reject_frames;
+ } page_3;
+ struct {
+ u64 cgr_reject_frames;
+ u64 cgr_reject_bytes;
+ } page_4;
+ struct {
+ u64 policer_cnt_red;
+ u64 policer_cnt_yellow;
+ u64 policer_cnt_green;
+ u64 policer_cnt_re_red;
+ u64 policer_cnt_re_yellow;
+ } page_5;
+ struct {
+ u64 tx_pending_frames;
+ } page_6;
+ struct {
u64 counter[DPNI_STATISTICS_CNT];
} raw;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index bf9aa533a7c6..4da0cde9695b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -35,9 +35,9 @@ struct ice_aqc_get_ver {
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
- __le32 driver_unloading;
+ u8 driver_unloading;
#define ICE_AQC_DRIVER_UNLOADING BIT(0)
- u8 reserved[12];
+ u8 reserved[15];
};
/* Request resource ownership (direct 0x0008)
@@ -91,6 +91,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
+#define ICE_AQC_CAPS_DCB 0x0018
#define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041
#define ICE_AQC_CAPS_TXQS 0x0042
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 302ad981129c..9492cd34b09d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1275,7 +1275,7 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
if (unloading)
- cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
+ cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -1594,6 +1594,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
prefix, func_p->guar_num_vsi);
}
break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: DCB = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active TC bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: TC max = %d\n", prefix, caps->maxtc);
+ break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index d60c942249e8..c5ee8d930611 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -444,9 +444,15 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
* |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
* ---------------------------------
*/
- ice_for_each_traffic_class(i)
+ ice_for_each_traffic_class(i) {
etscfg->tcbwtable[i] = buf[offset++];
+ if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT)
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ else
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+
/* Number of TCs supported (1 octet) */
etscfg->maxtcs = buf[offset];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index d9578919aad8..e922adf1fa15 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -413,7 +413,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = 1;
- dcbcfg->etscfg.maxtcs = 8;
+ dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
@@ -422,7 +422,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
dcbcfg->etsrec.willing = 0;
dcbcfg->pfc.willing = 1;
- dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
+ dcbcfg->pfc.pfccap = hw->func_caps.common_cap.maxtc;
dcbcfg->numapps = 1;
dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
@@ -452,11 +452,18 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
port_info = hw->port_info;
err = ice_init_dcb(hw);
+ if (err && !port_info->is_sw_lldp) {
+ dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err);
+ goto dcb_init_err;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
+ pf->hw.func_caps.common_cap.maxtc);
if (err) {
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(&pf->pdev->dev,
"FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
- port_info->is_sw_lldp = true;
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
@@ -468,11 +475,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- set_bit(ICE_FLAG_DCB_ENA, pf->flags);
return 0;
}
- port_info->is_sw_lldp = false;
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
/* DCBX in FW and LLDP enabled in FW */
@@ -484,7 +489,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err)
goto dcb_init_err;
- dev_info(&pf->pdev->dev, "DCBX offload supported\n");
return err;
dcb_init_err:
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f7dd0bd03d39..edba5bd79097 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3253,25 +3253,25 @@ static int
ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
+ const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
if (!rc->ring)
return -EINVAL;
- itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
-
switch (c_type) {
case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
netdev_info(vsi->netdev,
- "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
- pf->hw.intrl_gran, ICE_MAX_INTRL);
+ "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
+ c_type_str, pf->hw.intrl_gran,
+ ICE_MAX_INTRL);
return -EINVAL;
}
-
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx),
@@ -3279,60 +3279,60 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
pf->hw.intrl_gran));
}
- if (ec->rx_coalesce_usecs != itr_setting &&
- ec->use_adaptive_rx_coalesce) {
- netdev_info(vsi->netdev,
- "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
- return -EINVAL;
- }
+ use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
+ coalesce_usecs = ec->rx_coalesce_usecs;
- if (ec->rx_coalesce_usecs > ICE_ITR_MAX) {
- netdev_info(vsi->netdev,
- "Invalid value, rx-usecs range is 0-%d\n",
- ICE_ITR_MAX);
- return -EINVAL;
- }
-
- if (ec->use_adaptive_rx_coalesce) {
- rc->itr_setting |= ICE_ITR_DYNAMIC;
- } else {
- rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- }
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
netdev_info(vsi->netdev,
- "setting tx-usecs-high is not supported\n");
- return -EINVAL;
- }
-
- if (ec->tx_coalesce_usecs != itr_setting &&
- ec->use_adaptive_tx_coalesce) {
- netdev_info(vsi->netdev,
- "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ "setting %s-usecs-high is not supported\n",
+ c_type_str);
return -EINVAL;
}
- if (ec->tx_coalesce_usecs > ICE_ITR_MAX) {
- netdev_info(vsi->netdev,
- "Invalid value, tx-usecs range is 0-%d\n",
- ICE_ITR_MAX);
- return -EINVAL;
- }
+ use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
+ coalesce_usecs = ec->tx_coalesce_usecs;
- if (ec->use_adaptive_tx_coalesce) {
- rc->itr_setting |= ICE_ITR_DYNAMIC;
- } else {
- rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- }
break;
default:
dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
return -EINVAL;
}
+ itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
+ netdev_info(vsi->netdev,
+ "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
+ c_type_str, c_type_str);
+ return -EINVAL;
+ }
+
+ if (coalesce_usecs > ICE_ITR_MAX) {
+ netdev_info(vsi->netdev,
+ "Invalid value, %s-usecs range is 0-%d\n",
+ c_type_str, ICE_ITR_MAX);
+ return -EINVAL;
+ }
+
+ /* hardware only supports an ITR granularity of 2us */
+ if (coalesce_usecs % 2 != 0) {
+ netdev_info(vsi->netdev,
+ "Invalid value, %s-usecs must be even\n",
+ c_type_str);
+ return -EINVAL;
+ }
+
+ if (use_adaptive_coalesce) {
+ rc->itr_setting |= ICE_ITR_DYNAMIC;
+ } else {
+ /* store user facing value how it was set */
+ rc->itr_setting = coalesce_usecs;
+ /* set to static and convert to value HW understands */
+ rc->target_itr =
+ ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting));
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f029aee32913..50a17a0337be 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -567,6 +567,8 @@ static void ice_reset_subtask(struct ice_pf *pf)
reset_type = ICE_RESET_CORER;
if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
reset_type = ICE_RESET_GLOBR;
+ if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
+ reset_type = ICE_RESET_EMPR;
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
@@ -612,6 +614,22 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/**
+ * ice_print_topo_conflict - print topology conflict message
+ * @vsi: the VSI whose topology status is being checked
+ */
+static void ice_print_topo_conflict(struct ice_vsi *vsi)
+{
+ switch (vsi->port_info->phy.link_info.topo_media_conflict) {
+ case ICE_AQ_LINK_TOPO_CONFLICT:
+ case ICE_AQ_LINK_MEDIA_CONFLICT:
+ netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* ice_print_link_msg - print link up or down message
* @vsi: the VSI whose link status is being queried
* @isup: boolean for if the link is now up or down
@@ -624,6 +642,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
const char *speed;
const char *fec;
const char *fc;
+ const char *an;
if (!vsi)
return;
@@ -707,6 +726,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
break;
}
+ /* check if autoneg completed, might be false due to not supported */
+ if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
+ an = "True";
+ else
+ an = "False";
+
/* Get FEC mode requested based on PHY caps last SW configuration */
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
if (!caps) {
@@ -731,8 +756,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
devm_kfree(&vsi->back->pdev->dev, caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n",
- speed, fec_req, fec, fc);
+ netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, fec_req, fec, an, fc);
+ ice_print_topo_conflict(vsi);
}
/**
@@ -2636,6 +2662,11 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
ice_clear_interrupt_scheme(pf);
+ /* Issue a PFR as part of the prescribed driver unload flow. Do not
+ * do it via ice_schedule_reset() since there is no need to rebuild
+ * and the service task is already stopped.
+ */
+ ice_reset(&pf->hw, ICE_RESET_PFR);
pci_disable_pcie_error_reporting(pdev);
}
@@ -3446,12 +3477,16 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
vsi_stats = &vsi->net_stats;
- if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
+ if (!vsi->num_txq || !vsi->num_rxq)
return;
+
/* netdev packet/byte stats come from ring counter. These are obtained
* by summing up ring counters (done by ice_update_vsi_ring_stats).
+ * But, only call the update routine and read the registers if VSI is
+ * not down.
*/
- ice_update_vsi_ring_stats(vsi);
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ ice_update_vsi_ring_stats(vsi);
stats->tx_packets = vsi_stats->tx_packets;
stats->tx_bytes = vsi_stats->tx_bytes;
stats->rx_packets = vsi_stats->rx_packets;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 99cf527d2b1a..1acdd43a2edd 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1623,12 +1623,13 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
status = ice_aq_sw_rules(hw, s_rule,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
ice_aqc_opc_remove_sw_rules, NULL);
- if (status)
- goto exit;
/* Remove a book keeping from the list */
devm_kfree(ice_hw_to_dev(hw), s_rule);
+ if (status)
+ goto exit;
+
list_del(&list_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_elem);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index b538d0b9eb80..4501d50a7dcc 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -4,15 +4,15 @@
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
+#define ICE_BYTES_PER_WORD 2
+#define ICE_BYTES_PER_DWORD 4
+
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
-#define ICE_BYTES_PER_WORD 2
-#define ICE_BYTES_PER_DWORD 4
-
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
return test_bit(tc, &bitmap);
@@ -139,6 +139,9 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
/* Tx/Rx queues */
u16 num_rxq; /* Number/Total Rx queues */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b93324e9f4bc..c38939b1d496 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1074,9 +1074,16 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- for (v = 0; v < pf->num_alloc_vfs; v++)
- if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states))
- ice_dis_vf_qs(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vsi *vsi;
+
+ vf = &pf->vf[v];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
+ ice_dis_vf_qs(vf);
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1171,12 +1178,12 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
ice_dis_vf_qs(vf);
- else
- /* Call Disable LAN Tx queue AQ call even when queues are not
- * enabled. This is needed for successful completion of VFR
- */
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
+
+ /* Call Disable LAN Tx queue AQ whether or not queues are
+ * enabled. This is needed for successful completion of VFR.
+ */
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_id, NULL);
hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
@@ -2760,8 +2767,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
vf->num_vlan--;
- /* Disable VLAN pruning when removing VLAN */
- ice_cfg_vlan_pruning(vsi, false, false);
+ /* Disable VLAN pruning when the last VLAN is removed */
+ if (!vf->num_vlan)
+ ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
if (vlan_promisc) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 37fef8cd25e3..0d8dd885b7d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -154,3 +154,10 @@ config MLX5_EN_TLS
Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
+
+config MLX5_SW_STEERING
+ bool "Mellanox Technologies software-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for software-managed steering in the NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index f4de9ccb5df1..5708fcc079ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
- lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \
+ lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o
#
@@ -67,3 +67,10 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/ktls.o en_accel/ktls_tx.o
+
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
+ steering/dr_matcher.o steering/dr_rule.o \
+ steering/dr_icm_pool.o steering/dr_crc32.o \
+ steering/dr_ste.o steering/dr_send.o \
+ steering/dr_cmd.o steering/dr_fw.o \
+ steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index a400f4430c28..7bf7b6fbc776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -4,6 +4,7 @@
#include <devlink.h>
#include "mlx5_core.h"
+#include "fs_core.h"
#include "eswitch.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
@@ -107,12 +108,121 @@ void mlx5_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
+static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ char *value = val.vstr;
+ int err = 0;
+
+ if (!strcmp(value, "dmfs")) {
+ return 0;
+ } else if (!strcmp(value, "smfs")) {
+ u8 eswitch_mode;
+ bool smfs_cap;
+
+ eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch);
+ smfs_cap = mlx5_fs_dr_is_supported(dev);
+
+ if (!smfs_cap) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported by current device");
+ }
+
+ else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported when eswitch offlaods enabled.");
+ err = -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum mlx5_flow_steering_mode mode;
+
+ if (!strcmp(ctx->val.vstr, "smfs"))
+ mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ dev->priv.steering->mode = mode;
+
+ return 0;
+}
+
+static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ strcpy(ctx->val.vstr, "smfs");
+ else
+ strcpy(ctx->val.vstr, "dmfs");
+ return 0;
+}
+
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+};
+
+static const struct devlink_param mlx5_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
+ mlx5_devlink_fs_mode_validate),
+};
+
+static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
+ strcpy(value.vstr, "dmfs");
+ else
+ strcpy(value.vstr, "smfs");
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ value);
+}
+
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
- return devlink_register(devlink, dev);
+ int err;
+
+ err = devlink_register(devlink, dev);
+ if (err)
+ return err;
+
+ err = devlink_params_register(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
+ if (err)
+ goto params_reg_err;
+ mlx5_devlink_set_params_init_values(devlink);
+ devlink_params_publish(devlink);
+ return 0;
+
+params_reg_err:
+ devlink_unregister(devlink);
+ return err;
}
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 4c4620db3d31..f8ee18b4da6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -291,14 +291,14 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
*/
goto out;
}
-
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
goto destroy_neigh_entry;
+ }
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
@@ -407,13 +407,14 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
goto destroy_neigh_entry;
+ }
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index a0ae5069d8c3..8e512216deb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -161,7 +161,7 @@ struct mlx5e_encap_entry {
*/
struct hlist_node encap_hlist;
struct list_head flows;
- u32 encap_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5581a8045ede..30d26eba75a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -61,7 +61,7 @@
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
u32 hairpin_tirn;
u8 match_level;
struct mlx5_flow_table *hairpin_ft;
@@ -201,7 +201,7 @@ struct mlx5e_mod_hdr_entry {
struct mod_hdr_key key;
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
refcount_t refcnt;
struct completion res_ready;
@@ -334,7 +334,7 @@ static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
WARN_ON(!list_empty(&mh->flows));
if (mh->compl_result > 0)
- mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+ mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
kfree(mh);
}
@@ -395,11 +395,11 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
mutex_unlock(&tbl->lock);
- err = mlx5_modify_header_alloc(priv->mdev, namespace,
- mh->key.num_actions,
- mh->key.actions,
- &mh->mod_hdr_id);
- if (err) {
+ mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
+ mh->key.num_actions,
+ mh->key.actions);
+ if (IS_ERR(mh->modify_hdr)) {
+ err = PTR_ERR(mh->modify_hdr);
mh->compl_result = err;
goto alloc_header_err;
}
@@ -412,9 +412,9 @@ attach_flow:
list_add(&flow->mod_hdr, &mh->flows);
spin_unlock(&mh->flows_lock);
if (mlx5e_is_eswitch_flow(flow))
- flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
+ flow->esw_attr->modify_hdr = mh->modify_hdr;
else
- flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
+ flow->nic_attr->modify_hdr = mh->modify_hdr;
return 0;
@@ -906,7 +906,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
- .reformat_id = 0,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
@@ -947,7 +946,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- flow_act.modify_id = attr->mod_hdr_id;
+ flow_act.modify_hdr = attr->modify_hdr;
kfree(parse_attr->mod_hdr_actions);
if (err)
return err;
@@ -1304,14 +1303,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
- err);
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
+ PTR_ERR(e->pkt_reformat));
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -1326,7 +1324,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
esw_attr = flow->esw_attr;
spec = &esw_attr->parse_attr->spec;
- esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
+ esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
@@ -1395,7 +1393,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -1561,7 +1559,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
kfree(e->encap_header);
@@ -1896,7 +1894,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
} else if (*match_level != MLX5_MATCH_NONE) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ /* cvlan_tag enabled in match criteria and
+ * disabled in match value means both S & C tags
+ * don't exist (untagged of both)
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
@@ -3045,7 +3046,7 @@ attach_flow:
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->dests[out_index].encap_id = e->encap_id;
+ attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index aba9e7a6ad3c..6bd6f5895244 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -69,7 +69,7 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
- int modify_metadata_id;
+ struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
@@ -153,6 +153,7 @@ struct mlx5_eswitch_fdb {
} legacy;
struct offloads_fdb {
+ struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *peer_miss_grp;
@@ -385,11 +386,11 @@ struct mlx5_esw_flow_attr {
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
+ struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
- u32 encap_id;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
u8 inner_match_level;
u8 outer_match_level;
struct mlx5_fc *counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7d3582ee66b7..afa623b15a38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -190,10 +190,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.reformat_id = attr->dests[j].encap_id;
+ flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.reformat_id =
- attr->dests[j].encap_id;
+ dest[i].vport.pkt_reformat =
+ attr->dests[j].pkt_reformat;
}
i++;
}
@@ -213,7 +213,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- flow_act.modify_id = attr->mod_hdr_id;
+ flow_act.modify_hdr = attr->modify_hdr;
fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
if (IS_ERR(fdb)) {
@@ -276,7 +276,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.reformat_id = attr->dests[i].encap_id;
+ dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -1068,6 +1068,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
err = -EOPNOTSUPP;
goto ns_err;
}
+ esw->fdb_table.offloads.ns = root_ns;
+ err = mlx5_flow_namespace_set_mode(root_ns,
+ esw->dev->priv.steering->mode);
+ if (err) {
+ esw_warn(dev, "Failed to set FDB namespace steering mode\n");
+ goto ns_err;
+ }
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
MLX5_CAP_GEN(dev, max_flow_counter_15_0);
@@ -1207,6 +1214,8 @@ send_vport_err:
esw_destroy_offloads_fast_fdb_tables(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
+ /* Holds true only as long as DMFS is the default */
+ mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
ns_err:
kvfree(flow_group_in);
return err;
@@ -1226,6 +1235,9 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw_destroy_offloads_fast_fdb_tables(esw);
+ /* Holds true only as long as DMFS is the default */
+ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
+ MLX5_FLOW_STEERING_MODE_DMFS);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
@@ -1623,13 +1635,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
esw_del_fdb_peer_miss_rules(esw);
}
+static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw,
+ bool pair)
+{
+ struct mlx5_flow_root_namespace *peer_ns;
+ struct mlx5_flow_root_namespace *ns;
+ int err;
+
+ peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
+ ns = esw->dev->priv.steering->fdb_root_ns;
+
+ if (pair) {
+ err = mlx5_flow_namespace_set_peer(ns, peer_ns);
+ if (err)
+ return err;
+
+ mlx5_flow_namespace_set_peer(peer_ns, ns);
+ if (err) {
+ mlx5_flow_namespace_set_peer(ns, NULL);
+ return err;
+ }
+ } else {
+ mlx5_flow_namespace_set_peer(ns, NULL);
+ mlx5_flow_namespace_set_peer(peer_ns, NULL);
+ }
+
+ return 0;
+}
+
static int mlx5_esw_offloads_devcom_event(int event,
void *my_data,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
- struct mlx5_eswitch *peer_esw = event_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ struct mlx5_eswitch *peer_esw = event_data;
int err;
switch (event) {
@@ -1638,9 +1679,12 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
- err = mlx5_esw_offloads_pair(esw, peer_esw);
+ err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
+ err = mlx5_esw_offloads_pair(esw, peer_esw);
+ if (err)
+ goto err_peer;
err = mlx5_esw_offloads_pair(peer_esw, esw);
if (err)
@@ -1656,6 +1700,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw);
+ mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
break;
}
@@ -1663,7 +1708,8 @@ static int mlx5_esw_offloads_devcom_event(int event,
err_pair:
mlx5_esw_offloads_unpair(esw);
-
+err_peer:
+ mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
err_out:
mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
event, err);
@@ -1734,7 +1780,7 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
if (vport->ingress.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_id = vport->ingress.modify_metadata_id;
+ flow_act.modify_hdr = vport->ingress.modify_metadata;
}
vport->ingress.allow_rule =
@@ -1770,9 +1816,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
MLX5_SET(set_action_in, action, data,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
- err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- 1, action, &vport->ingress.modify_metadata_id);
- if (err) {
+ vport->ingress.modify_metadata =
+ mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ 1, action);
+ if (IS_ERR(vport->ingress.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
@@ -1780,7 +1828,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_id = vport->ingress.modify_metadata_id;
+ flow_act.modify_hdr = vport->ingress.modify_metadata;
vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
&spec, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.modify_metadata_rule)) {
@@ -1794,7 +1842,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
out:
if (err)
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
return err;
}
@@ -1803,7 +1851,7 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
{
if (vport->ingress.modify_metadata_rule) {
mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
vport->ingress.modify_metadata_rule = NULL;
}
@@ -2113,9 +2161,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ mlx5_rdma_enable_roce(esw->dev);
err = esw_offloads_steering_init(esw);
if (err)
- return err;
+ goto err_steering_init;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
@@ -2130,8 +2179,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
-
return 0;
err_reps:
@@ -2139,6 +2186,8 @@ err_reps:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
+err_steering_init:
+ mlx5_rdma_disable_roce(esw->dev);
return err;
}
@@ -2163,12 +2212,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
- mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ mlx5_rdma_disable_roce(esw->dev);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1e3381604b3d..579c306caa7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -107,6 +107,50 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
return 0;
}
+static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+}
+
+static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+}
+
+static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -412,11 +456,13 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
} else {
MLX5_SET(flow_context, in_flow_context, action,
fte->action.action);
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- fte->action.reformat_id);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
}
- MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_id);
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
@@ -468,7 +514,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
- dst->dest_attr.vport.reformat_id);
+ dst->dest_attr.vport.pkt_reformat->id);
}
break;
default:
@@ -643,14 +689,15 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
- enum mlx5_flow_namespace_type namespace,
- u32 *packet_reformat_id)
+static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
{
u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
void *reformat;
@@ -693,35 +740,36 @@ int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
- out, packet_reformat_id);
+ pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
+ out, packet_reformat_id);
kfree(in);
return err;
}
-EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
-void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- u32 packet_reformat_id)
+static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
{
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
memset(in, 0, sizeof(in));
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
- packet_reformat_id);
+ pkt_reformat->id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id)
+static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
{
u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
int max_actions, actions_size, inlen, err;
+ struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
u8 table_type;
u32 *in;
@@ -772,26 +820,26 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
kfree(in);
return err;
}
-EXPORT_SYMBOL(mlx5_modify_header_alloc);
-void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
+static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
{
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
- modify_header_id);
+ modify_hdr->id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
@@ -803,6 +851,13 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.update_fte = mlx5_cmd_update_fte,
.delete_fte = mlx5_cmd_delete_fte,
.update_root_ft = mlx5_cmd_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
@@ -815,9 +870,16 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.update_fte = mlx5_cmd_stub_update_fte,
.delete_fte = mlx5_cmd_stub_delete_fte,
.update_root_ft = mlx5_cmd_stub_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
};
-static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
{
return &mlx5_flow_cmds;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index bc4606306009..d62de642eca9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -75,6 +75,30 @@ struct mlx5_flow_cmds {
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);
+
+ int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ void (*packet_reformat_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ int (*modify_header_alloc)(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ int (*set_peer)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
+
+ int (*create_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
@@ -90,5 +114,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 7bdec442f0ac..3bbb49354829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1415,7 +1415,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
+ (d1->vport.pkt_reformat->id ==
+ d2->vport.pkt_reformat->id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -2888,3 +2889,160 @@ out:
return err;
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
+
+static struct mlx5_flow_root_namespace
+*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_namespace *ns;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
+ ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ else
+ ns = mlx5_get_flow_namespace(dev, ns_type);
+ if (!ns)
+ return NULL;
+
+ return find_root(&ns->node);
+}
+
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
+ if (!modify_hdr)
+ return ERR_PTR(-ENOMEM);
+
+ modify_hdr->ns_type = ns_type;
+ err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
+ modify_actions, modify_hdr);
+ if (err) {
+ kfree(modify_hdr);
+ return ERR_PTR(err);
+ }
+
+ return modify_hdr;
+}
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, modify_hdr->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->modify_header_dealloc(root, modify_hdr);
+ kfree(modify_hdr);
+}
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_flow_root_namespace *root;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
+ if (!pkt_reformat)
+ return ERR_PTR(-ENOMEM);
+
+ pkt_reformat->ns_type = ns_type;
+ pkt_reformat->reformat_type = reformat_type;
+ err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
+ reformat_data, ns_type,
+ pkt_reformat);
+ if (err) {
+ kfree(pkt_reformat);
+ return ERR_PTR(err);
+ }
+
+ return pkt_reformat;
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
+
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, pkt_reformat->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->packet_reformat_dealloc(root, pkt_reformat);
+ kfree(pkt_reformat);
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ if (peer_ns && ns->mode != peer_ns->mode) {
+ mlx5_core_err(ns->dev,
+ "Can't peer namespace of different steering mode\n");
+ return -EINVAL;
+ }
+
+ return ns->cmds->set_peer(ns, peer_ns);
+}
+
+/* This function should be called only at init stage of the namespace.
+ * It is not safe to call this function while steering operations
+ * are executed in the namespace.
+ */
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode)
+{
+ struct mlx5_flow_root_namespace *root;
+ const struct mlx5_flow_cmds *cmds;
+ int err;
+
+ root = find_root(&ns->node);
+ if (&root->ns != ns)
+ /* Can't set cmds to non root namespace */
+ return -EINVAL;
+
+ if (root->table_type != FS_FT_FDB)
+ return -EOPNOTSUPP;
+
+ if (root->mode == mode)
+ return 0;
+
+ if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ cmds = mlx5_fs_cmd_get_dr_cmds();
+ else
+ cmds = mlx5_fs_cmd_get_fw_cmds();
+ if (!cmds)
+ return -EOPNOTSUPP;
+
+ err = cmds->create_ns(root);
+ if (err) {
+ mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
+ err);
+ return err;
+ }
+
+ root->cmds->destroy_ns(root);
+ root->cmds = cmds;
+ root->mode = mode;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 0d16b4b5ab83..00717eba2256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -37,6 +37,24 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
+#include <steering/fs_dr.h>
+
+struct mlx5_modify_hdr {
+ enum mlx5_flow_namespace_type ns_type;
+ union {
+ struct mlx5_fs_dr_action action;
+ u32 id;
+ };
+};
+
+struct mlx5_pkt_reformat {
+ enum mlx5_flow_namespace_type ns_type;
+ int reformat_type; /* from mlx5_ifc */
+ union {
+ struct mlx5_fs_dr_action action;
+ u32 id;
+ };
+};
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
* and those are in parallel to one another when going over them to connect
@@ -80,9 +98,15 @@ enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
+enum mlx5_flow_steering_mode {
+ MLX5_FLOW_STEERING_MODE_DMFS,
+ MLX5_FLOW_STEERING_MODE_SMFS
+};
+
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
- struct kmem_cache *fgs_cache;
+ enum mlx5_flow_steering_mode mode;
+ struct kmem_cache *fgs_cache;
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
@@ -128,6 +152,7 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
+ struct mlx5_fs_dr_table fs_dr_table;
u32 id;
u16 vport;
unsigned int max_fte;
@@ -168,6 +193,7 @@ struct mlx5_ft_underlay_qp {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
+ struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 index;
@@ -203,6 +229,7 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -214,6 +241,8 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
+ enum mlx5_flow_steering_mode mode;
+ struct mlx5_fs_dr_domain fs_dr_domain;
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -231,6 +260,14 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
+
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode);
+
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
new file mode 100644
index 000000000000..e065c2f68f5a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+
+struct mlx5_dm {
+ /* protect access to icm bitmask */
+ spinlock_t lock;
+ unsigned long *steering_sw_icm_alloc_blocks;
+ unsigned long *header_modify_sw_icm_alloc_blocks;
+};
+
+struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
+{
+ u64 header_modify_icm_blocks = 0;
+ u64 steering_icm_blocks = 0;
+ struct mlx5_dm *dm;
+
+ if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
+ return 0;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&dm->lock);
+
+ if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) {
+ steering_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->steering_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(steering_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dm->steering_sw_icm_alloc_blocks)
+ goto err_steering;
+ }
+
+ if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) {
+ header_modify_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dm->header_modify_sw_icm_alloc_blocks)
+ goto err_modify_hdr;
+ }
+
+ return dm;
+
+err_modify_hdr:
+ kfree(dm->steering_sw_icm_alloc_blocks);
+
+err_steering:
+ kfree(dm);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_dm *dm = dev->dm;
+
+ if (!dev->dm)
+ return;
+
+ if (dm->steering_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ kfree(dm->steering_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ kfree(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ kfree(dm);
+}
+
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
+{
+ u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
+ struct mlx5_dm *dm = dev->dm;
+ unsigned long *block_map;
+ u64 icm_start_addr;
+ u32 log_icm_size;
+ u32 max_blocks;
+ u64 block_idx;
+ void *sw_icm;
+ int ret;
+
+ if (!dev->dm)
+ return -EOPNOTSUPP;
+
+ if (!length || (length & (length - 1)) ||
+ length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1))
+ return -EINVAL;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ switch (type) {
+ case MLX5_SW_ICM_TYPE_STEERING:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_sw_icm_size);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!block_map)
+ return -EOPNOTSUPP;
+
+ max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+ spin_lock(&dm->lock);
+ block_idx = bitmap_find_next_zero_area(block_map,
+ max_blocks,
+ 0,
+ num_blocks, 0);
+
+ if (block_idx < max_blocks)
+ bitmap_set(block_map,
+ block_idx, num_blocks);
+
+ spin_unlock(&dm->lock);
+
+ if (block_idx >= max_blocks)
+ return -ENOMEM;
+
+ sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
+ icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
+ icm_start_addr);
+ MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ block_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return ret;
+ }
+
+ *addr = icm_start_addr;
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc);
+
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id)
+{
+ u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ struct mlx5_dm *dm = dev->dm;
+ unsigned long *block_map;
+ u64 icm_start_addr;
+ u64 start_idx;
+ int err;
+
+ if (!dev->dm)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case MLX5_SW_ICM_TYPE_STEERING:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ start_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index dee1a8658c87..9648c2297803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -876,6 +876,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup;
}
+ dev->dm = mlx5_dm_create(dev);
+ if (IS_ERR(dev->dm))
+ mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -910,6 +914,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_dm_cleanup(dev);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 87b75b2207c4..b100489dc85c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -198,6 +198,9 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
+void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 18af6981e0be..0fc7de4aa572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -14,9 +14,6 @@ static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
- if (!roce->ft)
- return;
-
mlx5_del_flow_rules(roce->allow_rule);
mlx5_destroy_flow_group(roce->fg);
mlx5_destroy_flow_table(roce->ft);
@@ -145,6 +142,11 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+
+ if (!roce->ft)
+ return;
+
mlx5_rdma_disable_roce_steering(dev);
mlx5_rdma_del_roce_addr(dev);
mlx5_nic_vport_disable_roce(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
new file mode 100644
index 000000000000..a02f87f85c17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -0,0 +1,1588 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+enum dr_action_domain {
+ DR_ACTION_DOMAIN_NIC_INGRESS,
+ DR_ACTION_DOMAIN_NIC_EGRESS,
+ DR_ACTION_DOMAIN_FDB_INGRESS,
+ DR_ACTION_DOMAIN_FDB_EGRESS,
+ DR_ACTION_DOMAIN_MAX,
+};
+
+enum dr_action_valid_state {
+ DR_ACTION_STATE_ERR,
+ DR_ACTION_STATE_NO_ACTION,
+ DR_ACTION_STATE_REFORMAT,
+ DR_ACTION_STATE_MODIFY_HDR,
+ DR_ACTION_STATE_MODIFY_VLAN,
+ DR_ACTION_STATE_NON_TERM,
+ DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_MAX,
+};
+
+static const enum dr_action_valid_state
+next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
+ [DR_ACTION_DOMAIN_NIC_INGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_NIC_EGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_FDB_INGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_FDB_EGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+};
+
+struct dr_action_modify_field_conv {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
+static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+#define MAX_VLANS 2
+struct dr_action_vlan_info {
+ int count;
+ u32 headers[MAX_VLANS];
+};
+
+struct dr_action_apply_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct dr_action_vlan_info vlans;
+};
+
+static int
+dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
+ enum mlx5dr_action_type *action_type)
+{
+ switch (reformat_type) {
+ case DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2:
+ *action_type = DR_ACTION_TYP_TNL_L2_TO_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2:
+ *action_type = DR_ACTION_TYP_L2_TO_TNL_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2:
+ *action_type = DR_ACTION_TYP_TNL_L3_TO_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
+ *action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_actions_init_next_ste(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
+}
+
+static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ mlx5dr_ste_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
+}
+
+static void dr_actions_apply_rx(u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ mlx5dr_ste_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+}
+
+/* Apply the actions on the rule STE array starting from the last_ste.
+ * Actions might require more than one STE, new_num_stes will return
+ * the new size of the STEs array, rule with actions.
+ */
+static void dr_actions_apply(struct mlx5dr_domain *dmn,
+ enum mlx5dr_ste_entry_type ste_type,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *new_num_stes)
+{
+ u32 added_stes = 0;
+
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ else
+ dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+
+ last_ste += added_stes * DR_STE_SIZE;
+ *new_num_stes += added_stes;
+
+ mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static enum dr_action_domain
+dr_action_get_action_domain(enum mlx5dr_domain_type domain,
+ enum mlx5dr_ste_entry_type ste_type)
+{
+ switch (domain) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ return DR_ACTION_DOMAIN_NIC_INGRESS;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ return DR_ACTION_DOMAIN_NIC_EGRESS;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ return DR_ACTION_DOMAIN_FDB_INGRESS;
+ return DR_ACTION_DOMAIN_FDB_EGRESS;
+ default:
+ WARN_ON(true);
+ return DR_ACTION_DOMAIN_MAX;
+ }
+}
+
+static
+int dr_action_validate_and_get_next_state(enum dr_action_domain action_domain,
+ u32 action_type,
+ u32 *state)
+{
+ u32 cur_state = *state;
+
+ /* Check action state machine is valid */
+ *state = next_action_state[action_domain][cur_state][action_type];
+
+ if (*state == DR_ACTION_STATE_ERR)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *dest_action,
+ u64 *final_icm_addr)
+{
+ int ret;
+
+ switch (dest_action->action_type) {
+ case DR_ACTION_TYP_FT:
+ /* Allow destination flow table only if table is a terminating
+ * table, since there is an *assumption* that in such case FW
+ * will recalculate the CS.
+ */
+ if (dest_action->dest_tbl.is_fw_tbl) {
+ *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr;
+ } else {
+ mlx5dr_dbg(dmn,
+ "Destination FT should be terminating when modify TTL is used\n");
+ return -EINVAL;
+ }
+ break;
+
+ case DR_ACTION_TYP_VPORT:
+ /* If destination is vport we will get the FW flow table
+ * that recalculates the CS and forwards to the vport.
+ */
+ ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
+ dest_action->vport.num,
+ final_icm_addr);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define WITH_VLAN_NUM_HW_ACTIONS 6
+
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_action *actions[],
+ u32 num_actions,
+ u8 *ste_arr,
+ u32 *new_hw_ste_arr_sz)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_action *dest_action = NULL;
+ u32 state = DR_ACTION_STATE_NO_ACTION;
+ struct dr_action_apply_attr attr = {};
+ enum dr_action_domain action_domain;
+ bool recalc_cs_required = false;
+ u8 *last_ste;
+ int i, ret;
+
+ attr.gvmi = dmn->info.caps.gvmi;
+ attr.hit_gvmi = dmn->info.caps.gvmi;
+ attr.final_icm_addr = nic_dmn->default_icm_addr;
+ action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
+
+ for (i = 0; i < num_actions; i++) {
+ struct mlx5dr_action *action;
+ int max_actions_type = 1;
+ u32 action_type;
+
+ action = actions[i];
+ action_type = action->action_type;
+
+ switch (action_type) {
+ case DR_ACTION_TYP_DROP:
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ break;
+ case DR_ACTION_TYP_FT:
+ dest_action = action;
+ if (!action->dest_tbl.is_fw_tbl) {
+ if (action->dest_tbl.tbl->dmn != dmn) {
+ mlx5dr_dbg(dmn,
+ "Destination table belongs to a different domain\n");
+ goto out_invalid_arg;
+ }
+ if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ mlx5dr_dbg(dmn,
+ "Destination table level should be higher than source table\n");
+ goto out_invalid_arg;
+ }
+ attr.final_icm_addr = rx_rule ?
+ action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr;
+ } else {
+ struct mlx5dr_cmd_query_flow_table_details output;
+ int ret;
+
+ /* get the relevant addresses */
+ if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
+ ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
+ action->dest_tbl.fw_tbl.ft->type,
+ action->dest_tbl.fw_tbl.ft->id,
+ &output);
+ if (!ret) {
+ action->dest_tbl.fw_tbl.tx_icm_addr =
+ output.sw_owner_icm_root_1;
+ action->dest_tbl.fw_tbl.rx_icm_addr =
+ output.sw_owner_icm_root_0;
+ } else {
+ mlx5dr_dbg(dmn,
+ "Failed mlx5_cmd_query_flow_table ret: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ attr.final_icm_addr = rx_rule ?
+ action->dest_tbl.fw_tbl.rx_icm_addr :
+ action->dest_tbl.fw_tbl.tx_icm_addr;
+ }
+ break;
+ case DR_ACTION_TYP_QP:
+ mlx5dr_info(dmn, "Domain doesn't support QP\n");
+ goto out_invalid_arg;
+ case DR_ACTION_TYP_CTR:
+ attr.ctr_id = action->ctr.ctr_id +
+ action->ctr.offeset;
+ break;
+ case DR_ACTION_TYP_TAG:
+ attr.flow_tag = action->flow_tag;
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ attr.decap_index = action->rewrite.index;
+ attr.decap_actions = action->rewrite.num_of_actions;
+ attr.decap_with_vlan =
+ attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ attr.modify_index = action->rewrite.index;
+ attr.modify_actions = action->rewrite.num_of_actions;
+ recalc_cs_required = action->rewrite.modify_ttl;
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ attr.reformat_size = action->reformat.reformat_size;
+ attr.reformat_id = action->reformat.reformat_id;
+ break;
+ case DR_ACTION_TYP_VPORT:
+ attr.hit_gvmi = action->vport.caps->vhca_gvmi;
+ dest_action = action;
+ if (rx_rule) {
+ /* Loopback on WIRE vport is not supported */
+ if (action->vport.num == WIRE_PORT)
+ goto out_invalid_arg;
+
+ attr.final_icm_addr = action->vport.caps->icm_address_rx;
+ } else {
+ attr.final_icm_addr = action->vport.caps->icm_address_tx;
+ }
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ max_actions_type = MAX_VLANS;
+ attr.vlans.count++;
+ break;
+ case DR_ACTION_TYP_PUSH_VLAN:
+ max_actions_type = MAX_VLANS;
+ if (attr.vlans.count == MAX_VLANS)
+ return -EINVAL;
+
+ attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
+ break;
+ default:
+ goto out_invalid_arg;
+ }
+
+ /* Check action duplication */
+ if (++action_type_set[action_type] > max_actions_type) {
+ mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
+ action_type, max_actions_type);
+ goto out_invalid_arg;
+ }
+
+ /* Check action state machine is valid */
+ if (dr_action_validate_and_get_next_state(action_domain,
+ action_type,
+ &state)) {
+ mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
+ last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
+
+ /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect
+ * checksum calculation. In this case we will use a FW table to
+ * recalculate.
+ */
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+ rx_rule && recalc_cs_required && dest_action) {
+ ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
+ if (ret) {
+ mlx5dr_dbg(dmn,
+ "Failed to handle checksum recalculation err %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ dr_actions_apply(dmn,
+ nic_dmn->ste_type,
+ action_type_set,
+ last_ste,
+ &attr,
+ new_hw_ste_arr_sz);
+
+ return 0;
+
+out_invalid_arg:
+ return -EINVAL;
+}
+
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+#define HDR_LEN_L2_ONLY 14
+#define HDR_LEN_L2_VLAN 18
+#define REWRITE_HW_ACTION_NUM 6
+
+static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action,
+ void *data, size_t data_sz)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u64 ops[REWRITE_HW_ACTION_NUM] = {};
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+ int i = 0;
+ int ret;
+
+ vlan = (data_sz != HDR_LEN_L2_ONLY);
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_4b);
+ i++;
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_4b);
+ i++;
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ i++;
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
+ }
+ i++;
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ i++;
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ i++;
+ }
+
+ action->rewrite.data = (void *)ops;
+ action->rewrite.num_of_actions = i;
+ action->rewrite.chunk->byte_size = i * sizeof(*ops);
+
+ ret = mlx5dr_send_postsend_action(dmn, action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5dr_action *
+dr_action_create_generic(enum mlx5dr_action_type action_type)
+{
+ struct mlx5dr_action *action;
+
+ action = kzalloc(sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ action->action_type = action_type;
+ refcount_set(&action->refcount, 1);
+
+ return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void)
+{
+ return dr_action_create_generic(DR_ACTION_TYP_DROP);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
+{
+ struct mlx5dr_action *action;
+
+ refcount_inc(&tbl->refcount);
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ goto dec_ref;
+
+ action->dest_tbl.tbl = tbl;
+
+ return action;
+
+dec_ref:
+ refcount_dec(&tbl->refcount);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ return NULL;
+
+ action->dest_tbl.is_fw_tbl = 1;
+ action->dest_tbl.fw_tbl.ft = ft;
+ action->dest_tbl.fw_tbl.mdev = mdev;
+
+ return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ action->ctr.ctr_id = counter_id;
+
+ return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ action->flow_tag = tag_value & 0xffffff;
+
+ return action;
+}
+
+static int
+dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
+ struct mlx5dr_domain *dmn,
+ size_t data_sz,
+ void *data)
+{
+ if ((!data && data_sz) || (data && !data_sz) || reformat_type >
+ DR_ACTION_TYP_L2_TO_TNL_L3) {
+ mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
+ goto out_err;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+ return 0;
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ if (reformat_type != DR_ACTION_TYP_TNL_L2_TO_L2 &&
+ reformat_type != DR_ACTION_TYP_TNL_L3_TO_L2) {
+ mlx5dr_dbg(dmn, "Action reformat type not support on RX domain\n");
+ goto out_err;
+ }
+ } else if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ if (reformat_type != DR_ACTION_TYP_L2_TO_TNL_L2 &&
+ reformat_type != DR_ACTION_TYP_L2_TO_TNL_L3) {
+ mlx5dr_dbg(dmn, "Action reformat type not support on TX domain\n");
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ return -EINVAL;
+}
+
+#define ACTION_CACHE_LINE_SIZE 64
+
+static int
+dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ size_t data_sz, void *data,
+ struct mlx5dr_action *action)
+{
+ u32 reformat_id;
+ int ret;
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ {
+ enum mlx5dr_action_type rt;
+
+ if (action->action_type == DR_ACTION_TYP_L2_TO_TNL_L2)
+ rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ else
+ rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
+ &reformat_id);
+ if (ret)
+ return ret;
+
+ action->reformat.reformat_id = reformat_id;
+ action->reformat.reformat_size = data_sz;
+ return 0;
+ }
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ {
+ return 0;
+ }
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ {
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
+ return -EINVAL;
+
+ action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+ DR_CHUNK_SIZE_8);
+ if (!action->rewrite.chunk)
+ return -ENOMEM;
+
+ action->rewrite.index = (action->rewrite.chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
+
+ ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ if (ret) {
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ return ret;
+ }
+ return 0;
+ }
+ default:
+ mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
+ return -EINVAL;
+ }
+}
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
+{
+ return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
+}
+
+struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
+ __be32 vlan_hdr)
+{
+ u32 vlan_hdr_h = ntohl(vlan_hdr);
+ u16 ethertype = vlan_hdr_h >> 16;
+ struct mlx5dr_action *action;
+
+ if (ethertype != SVLAN_ETHERTYPE && ethertype != CVLAN_ETHERTYPE) {
+ mlx5dr_dbg(dmn, "Invalid vlan ethertype\n");
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ action->push_vlan.vlan_hdr = vlan_hdr_h;
+ return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data)
+{
+ enum mlx5dr_action_type action_type;
+ struct mlx5dr_action *action;
+ int ret;
+
+ refcount_inc(&dmn->refcount);
+
+ /* General checks */
+ ret = dr_action_reformat_to_action_type(reformat_type, &action_type);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Invalid reformat_type provided\n");
+ goto dec_ref;
+ }
+
+ ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data);
+ if (ret)
+ goto dec_ref;
+
+ action = dr_action_create_generic(action_type);
+ if (!action)
+ goto dec_ref;
+
+ action->reformat.dmn = dmn;
+
+ ret = dr_action_create_reformat_action(dmn,
+ data_sz,
+ data,
+ action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating reformat action %d\n", ret);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+static const struct dr_action_modify_field_conv *
+dr_action_modify_get_hw_info(u16 sw_field)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+
+ if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
+ goto not_found;
+
+ hw_action_info = &dr_action_conv_arr[sw_field];
+ if (!hw_action_info->end && !hw_action_info->start)
+ goto not_found;
+
+ return hw_action_info;
+
+not_found:
+ return NULL;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u8 offset, length, max_length, action;
+ u16 sw_field;
+ u8 hw_opcode;
+ u32 data;
+
+ /* Get SW modify action data */
+ action = MLX5_GET(set_action_in, sw_action, action_type);
+ length = MLX5_GET(set_action_in, sw_action, length);
+ offset = MLX5_GET(set_action_in, sw_action, offset);
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ data = MLX5_GET(set_action_in, sw_action, data);
+
+ /* Convert SW data to HW modify action format */
+ hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ if (!hw_action_info) {
+ mlx5dr_dbg(dmn, "Modify action invalid field given\n");
+ return -EINVAL;
+ }
+
+ max_length = hw_action_info->end - hw_action_info->start + 1;
+
+ switch (action) {
+ case MLX5_ACTION_TYPE_SET:
+ hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
+ /* PRM defines that length zero specific length of 32bits */
+ if (!length)
+ length = 32;
+
+ if (length + offset > max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
+ }
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
+ offset = 0;
+ length = max_length;
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
+ hw_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
+ hw_action_info->start + offset);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length,
+ length == 32 ? 0 : length);
+
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+
+ *ret_hw_info = hw_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
+ const __be64 *sw_action)
+{
+ u16 sw_field;
+ u8 action;
+
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ action = MLX5_GET(set_action_in, sw_action, action_type);
+
+ /* Check if SW field is supported in current domain (RX/TX) */
+ if (action == MLX5_ACTION_TYPE_SET) {
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+ } else if (action == MLX5_ACTION_TYPE_ADD) {
+ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
+ return -EINVAL;
+ }
+ } else {
+ mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool
+dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+
+ return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
+}
+
+static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
+ u32 max_hw_actions,
+ u32 num_sw_actions,
+ __be64 sw_actions[],
+ __be64 hw_actions[],
+ u32 *num_hw_actions,
+ bool *modify_ttl)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
+ u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
+ u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ int ret, i, hw_idx = 0;
+ __be64 *sw_action;
+ __be64 hw_action;
+
+ *modify_ttl = false;
+
+ for (i = 0; i < num_sw_actions; i++) {
+ sw_action = &sw_actions[i];
+
+ ret = dr_action_modify_check_field_limitation(dmn, sw_action);
+ if (ret)
+ return ret;
+
+ if (!(*modify_ttl))
+ *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action);
+
+ /* Convert SW action to HW action */
+ ret = dr_action_modify_sw_to_hw(dmn,
+ sw_action,
+ &hw_action,
+ &hw_action_info);
+ if (ret)
+ return ret;
+
+ /* Due to a HW limitation we cannot modify 2 different L3 types */
+ if (l3_type && hw_action_info->l3_type &&
+ hw_action_info->l3_type != l3_type) {
+ mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
+ return -EINVAL;
+ }
+ if (hw_action_info->l3_type)
+ l3_type = hw_action_info->l3_type;
+
+ /* Due to a HW limitation we cannot modify two different L4 types */
+ if (l4_type && hw_action_info->l4_type &&
+ hw_action_info->l4_type != l4_type) {
+ mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
+ return -EINVAL;
+ }
+ if (hw_action_info->l4_type)
+ l4_type = hw_action_info->l4_type;
+
+ /* HW reads and executes two actions at once this means we
+ * need to create a gap if two actions access the same field
+ */
+ if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
+ /* Check if after gap insertion the total number of HW
+ * modify actions doesn't exceeds the limit
+ */
+ hw_idx++;
+ if ((num_sw_actions + hw_idx - i) >= max_hw_actions) {
+ mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
+ return -EINVAL;
+ }
+ }
+ hw_field = hw_action_info->hw_field;
+
+ hw_actions[hw_idx] = hw_action;
+ hw_idx++;
+ }
+
+ *num_hw_actions = hw_idx;
+
+ return 0;
+}
+
+static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
+ size_t actions_sz,
+ __be64 actions[],
+ struct mlx5dr_action *action)
+{
+ struct mlx5dr_icm_chunk *chunk;
+ u32 max_hw_actions;
+ u32 num_hw_actions;
+ u32 num_sw_actions;
+ __be64 *hw_actions;
+ bool modify_ttl;
+ int ret;
+
+ num_sw_actions = actions_sz / DR_MODIFY_ACTION_SIZE;
+ max_hw_actions = mlx5dr_icm_pool_chunk_size_to_entries(DR_CHUNK_SIZE_16);
+
+ if (num_sw_actions > max_hw_actions) {
+ mlx5dr_dbg(dmn, "Max number of actions %d exceeds limit %d\n",
+ num_sw_actions, max_hw_actions);
+ return -EINVAL;
+ }
+
+ chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16);
+ if (!chunk)
+ return -ENOMEM;
+
+ hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!hw_actions) {
+ ret = -ENOMEM;
+ goto free_chunk;
+ }
+
+ ret = dr_actions_convert_modify_header(dmn,
+ max_hw_actions,
+ num_sw_actions,
+ actions,
+ hw_actions,
+ &num_hw_actions,
+ &modify_ttl);
+ if (ret)
+ goto free_hw_actions;
+
+ action->rewrite.chunk = chunk;
+ action->rewrite.modify_ttl = modify_ttl;
+ action->rewrite.data = (u8 *)hw_actions;
+ action->rewrite.num_of_actions = num_hw_actions;
+ action->rewrite.index = (chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
+
+ ret = mlx5dr_send_postsend_action(dmn, action);
+ if (ret)
+ goto free_hw_actions;
+
+ return 0;
+
+free_hw_actions:
+ kfree(hw_actions);
+free_chunk:
+ mlx5dr_icm_free_chunk(chunk);
+ return ret;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[])
+{
+ struct mlx5dr_action *action;
+ int ret = 0;
+
+ refcount_inc(&dmn->refcount);
+
+ if (actions_sz % DR_MODIFY_ACTION_SIZE) {
+ mlx5dr_dbg(dmn, "Invalid modify actions size provided\n");
+ goto dec_ref;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_MODIFY_HDR);
+ if (!action)
+ goto dec_ref;
+
+ action->rewrite.dmn = dmn;
+
+ ret = dr_action_create_modify_action(dmn,
+ actions_sz,
+ actions,
+ action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating modify header action %d\n", ret);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id)
+{
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *vport_dmn;
+ struct mlx5dr_action *action;
+ u8 peer_vport;
+
+ peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi);
+ vport_dmn = peer_vport ? dmn->peer_dmn : dmn;
+ if (!vport_dmn) {
+ mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
+ return NULL;
+ }
+
+ if (vport_dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5dr_dbg(dmn, "Domain doesn't support vport actions\n");
+ return NULL;
+ }
+
+ vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ if (!vport_cap) {
+ mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ action->vport.dmn = vport_dmn;
+ action->vport.caps = vport_cap;
+
+ return action;
+}
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action)
+{
+ if (refcount_read(&action->refcount) > 1)
+ return -EBUSY;
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_FT:
+ if (!action->dest_tbl.is_fw_tbl)
+ refcount_dec(&action->dest_tbl.tbl->refcount);
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
+ action->reformat.reformat_id);
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ refcount_dec(&action->rewrite.dmn->refcount);
+ break;
+ default:
+ break;
+ }
+
+ kfree(action);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
new file mode 100644
index 000000000000..41662c4e2664
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+ bool other_vport,
+ u16 vport_number,
+ u64 *icm_address_rx,
+ u64 *icm_address_tx)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ int err;
+
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *icm_address_rx =
+ MLX5_GET64(query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+ *icm_address_tx =
+ MLX5_GET64(query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ return 0;
+}
+
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
+ u16 vport_number, u16 *gvmi)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
+ MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+ return 0;
+}
+
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_esw_caps *caps)
+{
+ caps->drop_icm_address_rx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_fdb_action_drop_icm_address_rx);
+ caps->drop_icm_address_tx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_fdb_action_drop_icm_address_tx);
+ caps->uplink_icm_address_rx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ caps->uplink_icm_address_tx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ caps->sw_owner =
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
+ sw_owner);
+
+ return 0;
+}
+
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ struct mlx5dr_cmd_caps *caps)
+{
+ caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
+ caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
+ caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
+
+ if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
+ caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
+ }
+
+ if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
+ caps->flex_parser_id_icmpv6_dw0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
+ caps->flex_parser_id_icmpv6_dw1 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
+ }
+
+ caps->nic_rx_drop_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
+ caps->nic_tx_drop_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
+ caps->nic_tx_allow_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
+
+ caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
+ caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
+
+ caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
+
+ caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
+ caps->hdr_modify_icm_addr =
+ MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
+
+ caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+
+ return 0;
+}
+
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type,
+ u32 table_id,
+ struct mlx5dr_cmd_query_flow_table_details *output)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
+ int err;
+
+ MLX5_SET(query_flow_table_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE);
+
+ MLX5_SET(query_flow_table_in, in, table_type, type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ output->status = MLX5_GET(query_flow_table_out, out, status);
+ output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
+
+ output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
+ flow_table_context.sw_owner_icm_root_1);
+ output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
+ flow_table_context.sw_owner_icm_root_0);
+
+ return 0;
+}
+
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
+{
+ u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
+
+ MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ u32 modify_header_id,
+ u32 vport_id)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+ void *in_flow_context;
+ unsigned int inlen;
+ void *in_dests;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+ 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
+ MLX5_SET(flow_context, in_flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u8 num_of_actions,
+ u64 *actions,
+ u32 *modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
+ void *p_actions;
+ u32 inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
+ num_of_actions * sizeof(u64);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+ MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
+ p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+ memcpy(p_actions, actions, num_of_actions * sizeof(u64));
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+out:
+ kvfree(in);
+ return err;
+}
+
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
+
+ MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+ modify_header_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, table_id);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {};
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u64 icm_addr_rx,
+ u64 icm_addr_tx,
+ u8 level,
+ bool sw_owner,
+ bool term_tbl,
+ u64 *fdb_rx_icm_addr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
+ void *ft_mdev;
+ int err;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, table_type);
+
+ ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
+ MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
+ MLX5_SET(flow_table_context, ft_mdev, level, level);
+
+ if (sw_owner) {
+ /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
+ * icm_addr_1 used for FDB TX
+ */
+ if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_rx);
+ } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_tx);
+ } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_rx);
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_1, icm_addr_tx);
+ }
+ }
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+ if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
+ *fdb_rx_icm_addr =
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
+
+ return 0;
+}
+
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ u32 table_type)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+ enum mlx5_reformat_ctx_type rt,
+ size_t reformat_size,
+ void *reformat_data,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
+ size_t inlen, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int err;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
+ packet_reformat_context.reformat_data);
+ inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
+ memcpy(pdata, reformat_data, reformat_size);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+ kvfree(in);
+
+ return err;
+}
+
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
+
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+ reformat_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+ u16 index, struct mlx5dr_cmd_gid_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
+ int err;
+
+ MLX5_SET(query_roce_address_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
+
+ MLX5_SET(query_roce_address_in, in, roce_address_index, index);
+ MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ memcpy(&attr->gid,
+ MLX5_ADDR_OF(query_roce_address_out,
+ out, roce_address.source_l3_address),
+ sizeof(attr->gid));
+ memcpy(attr->mac,
+ MLX5_ADDR_OF(query_roce_address_out, out,
+ roce_address.source_mac_47_32),
+ sizeof(attr->mac));
+
+ if (MLX5_GET(query_roce_address_out, out,
+ roce_address.roce_version) == MLX5_ROCE_VERSION_2)
+ attr->roce_ver = MLX5_ROCE_VERSION_2;
+ else
+ attr->roce_ver = MLX5_ROCE_VERSION_1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
new file mode 100644
index 000000000000..9e2eccbb1eb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
+ * Slicing-by-16 contributed by Bulat Ziganshin
+ *
+ * This software is provided 'as-is', without any express or implied warranty.
+ * In no event will the author be held liable for any damages arising from the
+ * of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software.
+ * 2. If you use this software in a product, an acknowledgment in the product
+ * documentation would be appreciated but is not required.
+ * 3. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ *
+ * Taken from http://create.stephan-brumme.com/crc32/ and adapted.
+ */
+
+#include "dr_types.h"
+
+#define DR_STE_CRC_POLY 0xEDB88320L
+
+static u32 dr_ste_crc_tab32[8][256];
+
+static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
+{
+ tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
+}
+
+void mlx5dr_crc32_init_table(void)
+{
+ u32 crc, i, j;
+
+ for (i = 0; i < 256; i++) {
+ crc = i;
+ for (j = 0; j < 8; j++) {
+ if (crc & 0x00000001L)
+ crc = (crc >> 1) ^ DR_STE_CRC_POLY;
+ else
+ crc = crc >> 1;
+ }
+ dr_ste_crc_tab32[0][i] = crc;
+ }
+
+ /* Init CRC lookup tables according to crc_slice_8 algorithm */
+ for (i = 0; i < 256; i++) {
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
+ }
+}
+
+/* Compute CRC32 (Slicing-by-8 algorithm) */
+u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
+{
+ const u32 *curr = (const u32 *)input_data;
+ const u8 *curr_char;
+ u32 crc = 0, one, two;
+
+ if (!input_data)
+ return 0;
+
+ /* Process eight bytes at once (Slicing-by-8) */
+ while (length >= 8) {
+ one = *curr++ ^ crc;
+ two = *curr++;
+
+ crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
+ ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
+ ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
+ ^ dr_ste_crc_tab32[3][two & 0xff]
+ ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
+ ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
+ ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
+ ^ dr_ste_crc_tab32[7][one & 0xff];
+
+ length -= 8;
+ }
+
+ curr_char = (const u8 *)curr;
+ /* Remaining 1 to 7 bytes (standard algorithm) */
+ while (length-- != 0)
+ crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
+ ^ *curr_char++];
+
+ return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+ ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
new file mode 100644
index 000000000000..3b9cf0bccf4d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/mlx5/eswitch.h>
+#include "dr_types.h"
+
+static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+{
+ /* Per vport cached FW FT for checksum recalculation, this
+ * recalculation is needed due to a HW bug.
+ */
+ dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
+ sizeof(dmn->cache.recalc_cs_ft[0]),
+ GFP_KERNEL);
+ if (!dmn->cache.recalc_cs_ft)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+{
+ int i;
+
+ for (i = 0; i < dmn->info.caps.num_vports; i++) {
+ if (!dmn->cache.recalc_cs_ft[i])
+ continue;
+
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ }
+
+ kfree(dmn->cache.recalc_cs_ft);
+}
+
+int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u32 vport_num,
+ u64 *rx_icm_addr)
+{
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+
+ recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ if (!recalc_cs_ft) {
+ /* Table not in cache, need to allocate a new one */
+ recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
+ if (!recalc_cs_ft)
+ return -EINVAL;
+
+ dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ }
+
+ *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
+
+ return 0;
+}
+
+static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
+ return ret;
+ }
+
+ dmn->uar = mlx5_get_uars_page(dmn->mdev);
+ if (!dmn->uar) {
+ mlx5dr_err(dmn, "Couldn't allocate UAR\n");
+ goto clean_pd;
+ }
+
+ dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
+ if (!dmn->ste_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get icm memory for %s\n",
+ dev_name(dmn->mdev->device));
+ goto clean_uar;
+ }
+
+ dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
+ if (!dmn->action_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n",
+ dev_name(dmn->mdev->device));
+ goto free_ste_icm_pool;
+ }
+
+ ret = mlx5dr_send_ring_alloc(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create send-ring for %s\n",
+ dev_name(dmn->mdev->device));
+ goto free_action_icm_pool;
+ }
+
+ return 0;
+
+free_action_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+free_ste_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+clean_uar:
+ mlx5_put_uars_page(dmn->mdev, dmn->uar);
+clean_pd:
+ mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+
+ return ret;
+}
+
+static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_send_ring_free(dmn, dmn->send_ring);
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ mlx5_put_uars_page(dmn->mdev, dmn->uar);
+ mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+}
+
+static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
+ bool other_vport,
+ u16 vport_number)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ int ret;
+
+ vport_caps = &dmn->info.caps.vports_caps[vport_number];
+
+ ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
+ other_vport,
+ vport_number,
+ &vport_caps->icm_address_rx,
+ &vport_caps->icm_address_tx);
+ if (ret)
+ return ret;
+
+ ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
+ other_vport,
+ vport_number,
+ &vport_caps->vport_gvmi);
+ if (ret)
+ return ret;
+
+ vport_caps->num = vport_number;
+ vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
+
+ return 0;
+}
+
+static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+ struct mlx5dr_cmd_vport_cap *wire_vport;
+ int vport;
+ int ret;
+
+ /* Query vports (except wire vport) */
+ for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
+ ret = dr_domain_query_vport(dmn, !!vport, vport);
+ if (ret)
+ return ret;
+ }
+
+ /* Last vport is the wire port */
+ wire_vport = &dmn->info.caps.vports_caps[vport];
+ wire_vport->num = WIRE_PORT;
+ wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ wire_vport->vport_gvmi = 0;
+ wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+
+ return 0;
+}
+
+static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ if (!dmn->info.caps.eswitch_manager)
+ return -EOPNOTSUPP;
+
+ ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
+ if (ret)
+ return ret;
+
+ dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
+ dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
+ dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
+
+ dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
+ sizeof(dmn->info.caps.vports_caps[0]),
+ GFP_KERNEL);
+ if (!dmn->info.caps.vports_caps)
+ return -ENOMEM;
+
+ ret = dr_domain_query_vports(dmn);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed to query vports caps\n");
+ goto free_vports_caps;
+ }
+
+ dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+
+ return 0;
+
+free_vports_caps:
+ kfree(dmn->info.caps.vports_caps);
+ dmn->info.caps.vports_caps = NULL;
+ return ret;
+}
+
+static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ int ret;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
+ mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
+ return -EOPNOTSUPP;
+ }
+
+ dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
+
+ ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
+ if (ret)
+ return ret;
+
+ ret = dr_domain_query_fdb_caps(mdev, dmn);
+ if (ret)
+ return ret;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ if (!dmn->info.caps.rx_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
+ dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
+ dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ if (!dmn->info.caps.tx_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
+ dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ if (!dmn->info.caps.eswitch_manager)
+ return -ENOTSUPP;
+
+ if (!dmn->info.caps.fdb_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
+ dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
+ if (!vport_cap) {
+ mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
+ return -ENOENT;
+ }
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
+ dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
+ dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
+ dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
+ break;
+ default:
+ mlx5dr_dbg(dmn, "Invalid domain\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
+{
+ kfree(dmn->info.caps.vports_caps);
+}
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
+{
+ struct mlx5dr_domain *dmn;
+ int ret;
+
+ if (type > MLX5DR_DOMAIN_TYPE_FDB)
+ return NULL;
+
+ dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
+ if (!dmn)
+ return NULL;
+
+ dmn->mdev = mdev;
+ dmn->type = type;
+ refcount_set(&dmn->refcount, 1);
+ mutex_init(&dmn->mutex);
+
+ if (dr_domain_caps_init(mdev, dmn)) {
+ mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
+ goto free_domain;
+ }
+
+ dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
+ dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
+ dmn->info.caps.log_icm_size);
+
+ if (!dmn->info.supp_sw_steering) {
+ mlx5dr_err(dmn, "SW steering not supported for %s\n",
+ dev_name(mdev->device));
+ goto uninit_caps;
+ }
+
+ /* Allocate resources */
+ ret = dr_domain_init_resources(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed init domain resources for %s\n",
+ dev_name(mdev->device));
+ goto uninit_caps;
+ }
+
+ ret = dr_domain_init_cache(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed initialize domain cache\n");
+ goto uninit_resourses;
+ }
+
+ /* Init CRC table for htbl CRC calculation */
+ mlx5dr_crc32_init_table();
+
+ return dmn;
+
+uninit_resourses:
+ dr_domain_uninit_resources(dmn);
+uninit_caps:
+ dr_domain_caps_uninit(dmn);
+free_domain:
+ kfree(dmn);
+ return NULL;
+}
+
+/* Assure synchronization of the device steering tables with updates made by SW
+ * insertion.
+ */
+int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
+ mutex_lock(&dmn->mutex);
+ ret = mlx5dr_send_ring_force_drain(dmn);
+ mutex_unlock(&dmn->mutex);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
+ ret = mlx5dr_cmd_sync_steering(dmn->mdev);
+
+ return ret;
+}
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
+{
+ if (refcount_read(&dmn->refcount) > 1)
+ return -EBUSY;
+
+ /* make sure resources are not used by the hardware */
+ mlx5dr_cmd_sync_steering(dmn->mdev);
+ dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_resources(dmn);
+ dr_domain_caps_uninit(dmn);
+ mutex_destroy(&dmn->mutex);
+ kfree(dmn);
+ return 0;
+}
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn)
+{
+ mutex_lock(&dmn->mutex);
+
+ if (dmn->peer_dmn)
+ refcount_dec(&dmn->peer_dmn->refcount);
+
+ dmn->peer_dmn = peer_dmn;
+
+ if (dmn->peer_dmn)
+ refcount_inc(&dmn->peer_dmn->refcount);
+
+ mutex_unlock(&dmn->mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
new file mode 100644
index 000000000000..60ef6e6171e3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include "dr_types.h"
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+{
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ u32 table_id, group_id, modify_hdr_id;
+ u64 rx_icm_addr, modify_ttl_action;
+ int ret;
+
+ recalc_cs_ft = kzalloc(sizeof(*recalc_cs_ft), GFP_KERNEL);
+ if (!recalc_cs_ft)
+ return NULL;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+ 0, 0, dmn->info.caps.max_ft_level - 1,
+ false, true, &rx_icm_addr, &table_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
+ goto free_ttl_tbl;
+ }
+
+ ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, &group_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret);
+ goto destroy_flow_table;
+ }
+
+ /* Modify TTL action by adding zero to trigger CS recalculation */
+ modify_ttl_action = 0;
+ MLX5_SET(set_action_in, &modify_ttl_action, action_type, MLX5_ACTION_TYPE_ADD);
+ MLX5_SET(set_action_in, &modify_ttl_action, field, MLX5_ACTION_IN_FIELD_OUT_IP_TTL);
+
+ ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1,
+ &modify_ttl_action,
+ &modify_hdr_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret);
+ goto destroy_flow_group;
+ }
+
+ ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, group_id, modify_hdr_id,
+ vport_num);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret);
+ goto dealloc_modify_header;
+ }
+
+ recalc_cs_ft->modify_hdr_id = modify_hdr_id;
+ recalc_cs_ft->rx_icm_addr = rx_icm_addr;
+ recalc_cs_ft->table_id = table_id;
+ recalc_cs_ft->group_id = group_id;
+
+ return recalc_cs_ft;
+
+dealloc_modify_header:
+ mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id);
+destroy_flow_group:
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, group_id);
+destroy_flow_table:
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB);
+free_ttl_tbl:
+ kfree(recalc_cs_ft);
+ return NULL;
+}
+
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft)
+{
+ mlx5dr_cmd_del_flow_table_entry(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ recalc_cs_ft->table_id);
+ mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id);
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ recalc_cs_ft->table_id,
+ recalc_cs_ft->group_id);
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev,
+ recalc_cs_ft->table_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+
+ kfree(recalc_cs_ft);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
new file mode 100644
index 000000000000..e76f61e7555e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
+#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
+
+struct mlx5dr_icm_pool;
+
+struct mlx5dr_icm_bucket {
+ struct mlx5dr_icm_pool *pool;
+
+ /* Chunks that aren't visible to HW not directly and not in cache */
+ struct list_head free_list;
+ unsigned int free_list_count;
+
+ /* Used chunks, HW may be accessing this memory */
+ struct list_head used_list;
+ unsigned int used_list_count;
+
+ /* HW may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so. Before deciding to call
+ * sync_ste, this list is moved to sync_list
+ */
+ struct list_head hot_list;
+ unsigned int hot_list_count;
+
+ /* Pending sync list, entries from the hot list are moved to this list.
+ * sync_ste is executed and then sync_list is concatenated to the free list
+ */
+ struct list_head sync_list;
+ unsigned int sync_list_count;
+
+ u32 total_chunks;
+ u32 num_of_entries;
+ u32 entry_size;
+ /* protect the ICM bucket */
+ struct mutex mutex;
+};
+
+struct mlx5dr_icm_pool {
+ struct mlx5dr_icm_bucket *buckets;
+ enum mlx5dr_icm_type icm_type;
+ enum mlx5dr_icm_chunk_size max_log_chunk_sz;
+ enum mlx5dr_icm_chunk_size num_of_buckets;
+ struct list_head icm_mr_list;
+ /* protect the ICM MR list */
+ struct mutex mr_mutex;
+ struct mlx5dr_domain *dmn;
+};
+
+struct mlx5dr_icm_dm {
+ u32 obj_id;
+ enum mlx5_sw_icm_type type;
+ u64 addr;
+ size_t length;
+};
+
+struct mlx5dr_icm_mr {
+ struct mlx5dr_icm_pool *pool;
+ struct mlx5_core_mkey mkey;
+ struct mlx5dr_icm_dm dm;
+ size_t used_length;
+ size_t length;
+ u64 icm_start_addr;
+ struct list_head mr_list;
+};
+
+static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
+ u32 pd, u64 length, u64 start_addr, int mode,
+ struct mlx5_core_mkey *mkey)
+{
+ u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ }
+
+ MLX5_SET64(mkc, mkc, len, length);
+ MLX5_SET(mkc, mkc, pd, pd);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+
+ return mlx5_core_create_mkey(mdev, mkey, in, inlen);
+}
+
+static struct mlx5dr_icm_mr *
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
+ enum mlx5_sw_icm_type type,
+ size_t align_base)
+{
+ struct mlx5_core_dev *mdev = pool->dmn->mdev;
+ struct mlx5dr_icm_mr *icm_mr;
+ size_t align_diff;
+ int err;
+
+ icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
+ if (!icm_mr)
+ return NULL;
+
+ icm_mr->pool = pool;
+ INIT_LIST_HEAD(&icm_mr->mr_list);
+
+ icm_mr->dm.type = type;
+
+ /* 2^log_biggest_table * entry-size * double-for-alignment */
+ icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) * 2;
+
+ err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
+ &icm_mr->dm.addr, &icm_mr->dm.obj_id);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
+ goto free_icm_mr;
+ }
+
+ /* Register device memory */
+ err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
+ icm_mr->dm.length,
+ icm_mr->dm.addr,
+ MLX5_MKC_ACCESS_MODE_SW_ICM,
+ &icm_mr->mkey);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
+ goto free_dm;
+ }
+
+ icm_mr->icm_start_addr = icm_mr->dm.addr;
+
+ align_diff = icm_mr->icm_start_addr % align_base;
+ if (align_diff)
+ icm_mr->used_length = align_base - align_diff;
+
+ list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
+
+ return icm_mr;
+
+free_dm:
+ mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
+ icm_mr->dm.addr, icm_mr->dm.obj_id);
+free_icm_mr:
+ kvfree(icm_mr);
+ return NULL;
+}
+
+static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
+{
+ struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
+ struct mlx5dr_icm_dm *dm = &icm_mr->dm;
+
+ list_del(&icm_mr->mr_list);
+ mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
+ dm->addr, dm->obj_id);
+ kvfree(icm_mr);
+}
+
+static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ chunk->ste_arr = kvzalloc(bucket->num_of_entries *
+ sizeof(chunk->ste_arr[0]), GFP_KERNEL);
+ if (!chunk->ste_arr)
+ return -ENOMEM;
+
+ chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
+ DR_STE_SIZE_REDUCED, GFP_KERNEL);
+ if (!chunk->hw_ste_arr)
+ goto out_free_ste_arr;
+
+ chunk->miss_list = kvmalloc(bucket->num_of_entries *
+ sizeof(chunk->miss_list[0]), GFP_KERNEL);
+ if (!chunk->miss_list)
+ goto out_free_hw_ste_arr;
+
+ return 0;
+
+out_free_hw_ste_arr:
+ kvfree(chunk->hw_ste_arr);
+out_free_ste_arr:
+ kvfree(chunk->ste_arr);
+ return -ENOMEM;
+}
+
+static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
+{
+ size_t mr_free_size, mr_req_size, mr_row_size;
+ struct mlx5dr_icm_pool *pool = bucket->pool;
+ struct mlx5dr_icm_mr *icm_mr = NULL;
+ struct mlx5dr_icm_chunk *chunk;
+ enum mlx5_sw_icm_type dm_type;
+ size_t align_base;
+ int i, err = 0;
+
+ mr_req_size = bucket->num_of_entries * bucket->entry_size;
+ mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type);
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ dm_type = MLX5_SW_ICM_TYPE_STEERING;
+ /* Align base is the biggest chunk size / row size */
+ align_base = mr_row_size;
+ } else {
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ /* Align base is 64B */
+ align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
+ }
+
+ mutex_lock(&pool->mr_mutex);
+ if (!list_empty(&pool->icm_mr_list)) {
+ icm_mr = list_last_entry(&pool->icm_mr_list,
+ struct mlx5dr_icm_mr, mr_list);
+
+ if (icm_mr)
+ mr_free_size = icm_mr->dm.length - icm_mr->used_length;
+ }
+
+ if (!icm_mr || mr_free_size < mr_row_size) {
+ icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
+ if (!icm_mr) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ /* Create memory aligned chunks */
+ for (i = 0; i < mr_row_size / mr_req_size; i++) {
+ chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ chunk->bucket = bucket;
+ chunk->rkey = icm_mr->mkey.key;
+ /* mr start addr is zero based */
+ chunk->mr_addr = icm_mr->used_length;
+ chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
+ icm_mr->used_length += mr_req_size;
+ chunk->num_of_entries = bucket->num_of_entries;
+ chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ err = dr_icm_chunk_ste_init(chunk);
+ if (err)
+ goto out_free_chunk;
+ }
+
+ INIT_LIST_HEAD(&chunk->chunk_list);
+ list_add(&chunk->chunk_list, &bucket->free_list);
+ bucket->free_list_count++;
+ bucket->total_chunks++;
+ }
+ mutex_unlock(&pool->mr_mutex);
+ return 0;
+
+out_free_chunk:
+ kvfree(chunk);
+out_err:
+ mutex_unlock(&pool->mr_mutex);
+ return err;
+}
+
+static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
+{
+ kvfree(chunk->miss_list);
+ kvfree(chunk->hw_ste_arr);
+ kvfree(chunk->ste_arr);
+}
+
+static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ list_del(&chunk->chunk_list);
+ bucket->total_chunks--;
+
+ if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_cleanup(chunk);
+
+ kvfree(chunk);
+}
+
+static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *bucket,
+ enum mlx5dr_icm_chunk_size chunk_size)
+{
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ bucket->entry_size = DR_STE_SIZE;
+ else
+ bucket->entry_size = DR_MODIFY_ACTION_SIZE;
+
+ bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+ bucket->pool = pool;
+ mutex_init(&bucket->mutex);
+ INIT_LIST_HEAD(&bucket->free_list);
+ INIT_LIST_HEAD(&bucket->used_list);
+ INIT_LIST_HEAD(&bucket->hot_list);
+ INIT_LIST_HEAD(&bucket->sync_list);
+}
+
+static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
+{
+ struct mlx5dr_icm_chunk *chunk, *next;
+
+ mutex_destroy(&bucket->mutex);
+ list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
+ list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
+
+ list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
+ dr_icm_chunk_destroy(chunk);
+
+ WARN_ON(bucket->total_chunks != 0);
+
+ /* Cleanup of unreturned chunks */
+ list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
+ dr_icm_chunk_destroy(chunk);
+}
+
+static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
+{
+ u64 hot_size = 0;
+ int chunk_order;
+
+ for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
+ hot_size += pool->buckets[chunk_order].hot_list_count *
+ mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
+
+ return hot_size;
+}
+
+static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *bucket)
+{
+ u64 bytes_for_sync;
+
+ bytes_for_sync = dr_icm_hot_mem_size(pool);
+ if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
+ return false;
+
+ return true;
+}
+
+static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
+ bucket->sync_list_count += bucket->hot_list_count;
+ bucket->hot_list_count = 0;
+}
+
+static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
+ bucket->free_list_count += bucket->sync_list_count;
+ bucket->sync_list_count = 0;
+}
+
+static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
+ bucket->hot_list_count += bucket->sync_list_count;
+ bucket->sync_list_count = 0;
+}
+
+static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_start(bucket);
+ continue;
+ }
+
+ /* Freeing the mutex is done at the end of that process, after
+ * sync_ste was executed at dr_icm_chill_buckets_end func.
+ */
+ if (mutex_trylock(&bucket->mutex)) {
+ dr_icm_chill_bucket_start(bucket);
+ buckets[i] = true;
+ }
+ }
+}
+
+static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_end(bucket);
+ continue;
+ }
+
+ if (!buckets[i])
+ continue;
+
+ dr_icm_chill_bucket_end(bucket);
+ mutex_unlock(&bucket->mutex);
+ }
+}
+
+static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_abort(bucket);
+ continue;
+ }
+
+ if (!buckets[i])
+ continue;
+
+ dr_icm_chill_bucket_abort(bucket);
+ mutex_unlock(&bucket->mutex);
+ }
+}
+
+/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
+ * also memory used for HW STE management for optimizations.
+ */
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size)
+{
+ struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
+ bool buckets[DR_CHUNK_SIZE_MAX] = {};
+ struct mlx5dr_icm_bucket *bucket;
+ int err;
+
+ if (chunk_size > pool->max_log_chunk_sz)
+ return NULL;
+
+ bucket = &pool->buckets[chunk_size];
+
+ mutex_lock(&bucket->mutex);
+
+ /* Take chunk from pool if available, otherwise allocate new chunks */
+ if (list_empty(&bucket->free_list)) {
+ if (dr_icm_reuse_hot_entries(pool, bucket)) {
+ dr_icm_chill_buckets_start(pool, bucket, buckets);
+ err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
+ if (err) {
+ dr_icm_chill_buckets_abort(pool, bucket, buckets);
+ mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
+ chunk = NULL;
+ goto out;
+ }
+ dr_icm_chill_buckets_end(pool, bucket, buckets);
+ } else {
+ dr_icm_chunks_create(bucket);
+ }
+ }
+
+ if (!list_empty(&bucket->free_list)) {
+ chunk = list_last_entry(&bucket->free_list,
+ struct mlx5dr_icm_chunk,
+ chunk_list);
+ if (chunk) {
+ list_del_init(&chunk->chunk_list);
+ list_add_tail(&chunk->chunk_list, &bucket->used_list);
+ bucket->free_list_count--;
+ bucket->used_list_count++;
+ }
+ }
+out:
+ mutex_unlock(&bucket->mutex);
+ return chunk;
+}
+
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
+ memset(chunk->ste_arr, 0,
+ bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
+ memset(chunk->hw_ste_arr, 0,
+ bucket->num_of_entries * DR_STE_SIZE_REDUCED);
+ }
+
+ mutex_lock(&bucket->mutex);
+ list_del_init(&chunk->chunk_list);
+ list_add_tail(&chunk->chunk_list, &bucket->hot_list);
+ bucket->hot_list_count++;
+ bucket->used_list_count--;
+ mutex_unlock(&bucket->mutex);
+}
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+ enum mlx5dr_icm_type icm_type)
+{
+ enum mlx5dr_icm_chunk_size max_log_chunk_sz;
+ struct mlx5dr_icm_pool *pool;
+ int i;
+
+ if (icm_type == DR_ICM_TYPE_STE)
+ max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
+ else
+ max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->buckets = kcalloc(max_log_chunk_sz + 1,
+ sizeof(pool->buckets[0]),
+ GFP_KERNEL);
+ if (!pool->buckets)
+ goto free_pool;
+
+ pool->dmn = dmn;
+ pool->icm_type = icm_type;
+ pool->max_log_chunk_sz = max_log_chunk_sz;
+ pool->num_of_buckets = max_log_chunk_sz + 1;
+ INIT_LIST_HEAD(&pool->icm_mr_list);
+
+ for (i = 0; i < pool->num_of_buckets; i++)
+ dr_icm_bucket_init(pool, &pool->buckets[i], i);
+
+ mutex_init(&pool->mr_mutex);
+
+ return pool;
+
+free_pool:
+ kvfree(pool);
+ return NULL;
+}
+
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
+{
+ struct mlx5dr_icm_mr *icm_mr, *next;
+ int i;
+
+ mutex_destroy(&pool->mr_mutex);
+
+ list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
+ dr_icm_pool_mr_destroy(icm_mr);
+
+ for (i = 0; i < pool->num_of_buckets; i++)
+ dr_icm_bucket_cleanup(&pool->buckets[i]);
+
+ kfree(pool->buckets);
+ kvfree(pool);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
new file mode 100644
index 000000000000..01008cd66f75
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->smac_47_16 || spec->smac_15_0);
+}
+
+static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dmac_47_16 || spec->dmac_15_0);
+}
+
+static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
+ spec->src_ip_63_32 || spec->src_ip_31_0);
+}
+
+static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
+ spec->dst_ip_63_32 || spec->dst_ip_31_0);
+}
+
+static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
+ spec->ip_ecn || spec->ip_dscp);
+}
+
+static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->tcp_sport || spec->tcp_dport ||
+ spec->udp_sport || spec->udp_dport);
+}
+
+static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dst_ip_31_0 || spec->src_ip_31_0);
+}
+
+static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
+{
+ return (dr_mask_is_l3_base_set(spec) ||
+ dr_mask_is_tcp_udp_base_set(spec) ||
+ dr_mask_is_ipv4_set(spec));
+}
+
+static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
+{
+ return misc->vxlan_vni;
+}
+
+static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
+{
+ return spec->ttl_hoplimit;
+}
+
+#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
+ (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
+ (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
+ (_spec).ethertype || (_spec).ip_version || \
+ (_misc)._inner_outer##_second_vid || \
+ (_misc)._inner_outer##_second_cfi || \
+ (_misc)._inner_outer##_second_prio || \
+ (_misc)._inner_outer##_second_cvlan_tag || \
+ (_misc)._inner_outer##_second_svlan_tag)
+
+#define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
+ dr_mask_is_l3_base_set(&(_spec)) || \
+ dr_mask_is_tcp_udp_base_set(&(_spec)) || \
+ dr_mask_is_ttl_set(&(_spec)) || \
+ (_misc)._inner_outer##_ipv6_flow_label)
+
+#define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
+ (_misc3)._inner_outer##_tcp_seq_num || \
+ (_misc3)._inner_outer##_tcp_ack_num)
+
+#define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
+ (_misc2)._inner_outer##_first_mpls_label || \
+ (_misc2)._inner_outer##_first_mpls_exp || \
+ (_misc2)._inner_outer##_first_mpls_s_bos || \
+ (_misc2)._inner_outer##_first_mpls_ttl)
+
+static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
+{
+ return (misc->gre_key_h || misc->gre_key_l ||
+ misc->gre_protocol || misc->gre_c_present ||
+ misc->gre_k_present || misc->gre_s_present);
+}
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \
+ (_misc2).outer_first_mpls_over_##gre_udp##_label || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_exp || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_ttl)
+
+#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \
+ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
+ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
+
+static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return (misc3->outer_vxlan_gpe_vni ||
+ misc3->outer_vxlan_gpe_next_protocol ||
+ misc3->outer_vxlan_gpe_flags);
+}
+
+static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return (misc3->icmpv6_type || misc3->icmpv6_code ||
+ misc3->icmpv6_header_data);
+}
+
+static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return misc2->metadata_reg_a;
+}
+
+static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
+ misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
+}
+
+static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
+ misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
+}
+
+static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
+{
+ return (misc->source_sqn || misc->source_port);
+}
+
+static bool
+dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
+{
+ return dmn->info.caps.flex_protocols &
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+}
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6)
+{
+ if (ipv6) {
+ nic_matcher->ste_builder = nic_matcher->ste_builder6;
+ nic_matcher->num_of_builders = nic_matcher->num_of_builders6;
+ } else {
+ nic_matcher->ste_builder = nic_matcher->ste_builder4;
+ nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
+ }
+
+ if (!nic_matcher->num_of_builders) {
+ mlx5dr_dbg(matcher->tbl->dmn,
+ "Rule not supported on this matcher due to IP related fields\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_match_param mask = {};
+ struct mlx5dr_match_misc3 *misc3;
+ struct mlx5dr_ste_build *sb;
+ u8 *num_of_builders;
+ bool inner, rx;
+ int idx = 0;
+ int ret, i;
+
+ if (ipv6) {
+ sb = nic_matcher->ste_builder6;
+ num_of_builders = &nic_matcher->num_of_builders6;
+ } else {
+ sb = nic_matcher->ste_builder4;
+ num_of_builders = &nic_matcher->num_of_builders4;
+ }
+
+ rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+
+ /* Create a temporary mask to track and clear used mask fields */
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
+ mask.outer = matcher->mask.outer;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
+ mask.misc = matcher->mask.misc;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
+ mask.inner = matcher->mask.inner;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
+ mask.misc2 = matcher->mask.misc2;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
+ mask.misc3 = matcher->mask.misc3;
+
+ ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+ &matcher->mask, NULL);
+ if (ret)
+ return ret;
+
+ /* Outer */
+ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
+ DR_MATCHER_CRITERIA_MISC |
+ DR_MATCHER_CRITERIA_MISC2 |
+ DR_MATCHER_CRITERIA_MISC3)) {
+ inner = false;
+
+ if (dr_mask_is_wqe_metadata_set(&mask.misc2))
+ mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
+ mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
+ mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
+ (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
+ ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
+ &dmn->info.caps,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.outer) &&
+ dr_mask_is_dmac_set(&mask.outer)) {
+ ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.outer))
+ mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
+ mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+
+ if (ipv6) {
+ if (dr_mask_is_dst_addr_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_src_addr_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
+ mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
+ } else {
+ if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_ttl_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
+ inner, rx);
+ }
+
+ if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) &&
+ dr_matcher_supp_flex_parser_vxlan_gpe(dmn))
+ mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
+ mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
+ mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
+ mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
+ inner, rx);
+
+ misc3 = &mask.misc3;
+ if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) &&
+ mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
+ (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
+ mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
+ ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+ if (dr_mask_is_gre_set(&mask.misc))
+ mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx);
+ }
+
+ /* Inner */
+ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
+ DR_MATCHER_CRITERIA_MISC |
+ DR_MATCHER_CRITERIA_MISC2 |
+ DR_MATCHER_CRITERIA_MISC3)) {
+ inner = true;
+
+ if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
+ mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_smac_set(&mask.inner) &&
+ dr_mask_is_dmac_set(&mask.inner)) {
+ ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ &mask, inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.inner))
+ mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
+ mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+
+ if (ipv6) {
+ if (dr_mask_is_dst_addr_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_src_addr_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
+ mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
+ } else {
+ if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_ttl_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
+ inner, rx);
+ }
+
+ if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
+ mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
+ mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
+ mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx);
+ }
+ /* Empty matcher, takes all */
+ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
+ mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
+
+ if (idx == 0) {
+ mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
+ return -EINVAL;
+ }
+
+ /* Check that all mask fields were consumed */
+ for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
+ if (((u8 *)&mask)[i] != 0) {
+ mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ *num_of_builders = idx;
+
+ return 0;
+}
+
+static int dr_matcher_connect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+ struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *prev_htbl;
+ int ret;
+
+ /* Connect end anchor hash table to next_htbl or to the default address */
+ if (next_nic_matcher) {
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = next_nic_matcher->s_htbl;
+ } else {
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+ }
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ curr_nic_matcher->e_anchor,
+ &info, info.type == CONNECT_HIT);
+ if (ret)
+ return ret;
+
+ /* Connect start hash table to end anchor */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ curr_nic_matcher->s_htbl,
+ &info, false);
+ if (ret)
+ return ret;
+
+ /* Connect previous hash table to matcher start hash table */
+ if (prev_nic_matcher)
+ prev_htbl = prev_nic_matcher->e_anchor;
+ else
+ prev_htbl = nic_tbl->s_anchor;
+
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = curr_nic_matcher->s_htbl;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
+ &info, true);
+ if (ret)
+ return ret;
+
+ /* Update the pointing ste and next hash table */
+ curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
+ prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+
+ if (next_nic_matcher) {
+ next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
+ curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ }
+
+ return 0;
+}
+
+static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ bool first = true;
+ int ret;
+
+ next_matcher = NULL;
+ if (!list_empty(&tbl->matcher_list))
+ list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
+ if (tmp_matcher->prio >= matcher->prio) {
+ next_matcher = tmp_matcher;
+ break;
+ }
+ first = false;
+ }
+
+ prev_matcher = NULL;
+ if (next_matcher && !first)
+ prev_matcher = list_entry(next_matcher->matcher_list.prev,
+ struct mlx5dr_matcher,
+ matcher_list);
+ else if (!first)
+ prev_matcher = list_entry(tbl->matcher_list.prev,
+ struct mlx5dr_matcher,
+ matcher_list);
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ ret = dr_matcher_connect(dmn, &matcher->rx,
+ next_matcher ? &next_matcher->rx : NULL,
+ prev_matcher ? &prev_matcher->rx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ ret = dr_matcher_connect(dmn, &matcher->tx,
+ next_matcher ? &next_matcher->tx : NULL,
+ prev_matcher ? &prev_matcher->tx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (prev_matcher)
+ list_add(&matcher->matcher_list, &prev_matcher->matcher_list);
+ else if (next_matcher)
+ list_add_tail(&matcher->matcher_list,
+ &next_matcher->matcher_list);
+ else
+ list_add(&matcher->matcher_list, &tbl->matcher_list);
+
+ return 0;
+}
+
+static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ mlx5dr_htbl_put(nic_matcher->s_htbl);
+ mlx5dr_htbl_put(nic_matcher->e_anchor);
+}
+
+static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
+{
+ dr_matcher_uninit_nic(&matcher->rx);
+ dr_matcher_uninit_nic(&matcher->tx);
+}
+
+static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_matcher_uninit_nic(&matcher->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_matcher_uninit_nic(&matcher->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_matcher_uninit_fdb(matcher);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret, ret_v4, ret_v6;
+
+ ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false);
+ ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true);
+
+ if (ret_v4 && ret_v6) {
+ mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+ return -EINVAL;
+ }
+
+ if (!ret_v4)
+ nic_matcher->ste_builder = nic_matcher->ste_builder4;
+ else
+ nic_matcher->ste_builder = nic_matcher->ste_builder6;
+
+ nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!nic_matcher->e_anchor)
+ return -ENOMEM;
+
+ nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ nic_matcher->ste_builder[0].lu_type,
+ nic_matcher->ste_builder[0].byte_mask);
+ if (!nic_matcher->s_htbl) {
+ ret = -ENOMEM;
+ goto free_e_htbl;
+ }
+
+ /* make sure the tables exist while empty */
+ mlx5dr_htbl_get(nic_matcher->s_htbl);
+ mlx5dr_htbl_get(nic_matcher->e_anchor);
+
+ return 0;
+
+free_e_htbl:
+ mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
+ return ret;
+}
+
+static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
+{
+ int ret;
+
+ ret = dr_matcher_init_nic(matcher, &matcher->rx);
+ if (ret)
+ return ret;
+
+ ret = dr_matcher_init_nic(matcher, &matcher->tx);
+ if (ret)
+ goto uninit_nic_rx;
+
+ return 0;
+
+uninit_nic_rx:
+ dr_matcher_uninit_nic(&matcher->rx);
+ return ret;
+}
+
+static int dr_matcher_init(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret;
+
+ if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
+ mlx5dr_info(dmn, "Invalid match criteria attribute\n");
+ return -EINVAL;
+ }
+
+ if (mask) {
+ if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
+ mlx5dr_info(dmn, "Invalid match size attribute\n");
+ return -EINVAL;
+ }
+ mlx5dr_ste_copy_param(matcher->match_criteria,
+ &matcher->mask, mask);
+ }
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ matcher->rx.nic_tbl = &tbl->rx;
+ ret = dr_matcher_init_nic(matcher, &matcher->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ matcher->tx.nic_tbl = &tbl->tx;
+ ret = dr_matcher_init_nic(matcher, &matcher->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ matcher->rx.nic_tbl = &tbl->rx;
+ matcher->tx.nic_tbl = &tbl->tx;
+ ret = dr_matcher_init_fdb(matcher);
+ break;
+ default:
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *tbl,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_matcher *matcher;
+ int ret;
+
+ refcount_inc(&tbl->refcount);
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ goto dec_ref;
+
+ matcher->tbl = tbl;
+ matcher->prio = priority;
+ matcher->match_criteria = match_criteria_enable;
+ refcount_set(&matcher->refcount, 1);
+ INIT_LIST_HEAD(&matcher->matcher_list);
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ ret = dr_matcher_init(matcher, mask);
+ if (ret)
+ goto free_matcher;
+
+ ret = dr_matcher_add_to_tbl(matcher);
+ if (ret)
+ goto matcher_uninit;
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ return matcher;
+
+matcher_uninit:
+ dr_matcher_uninit(matcher);
+free_matcher:
+ mutex_unlock(&tbl->dmn->mutex);
+ kfree(matcher);
+dec_ref:
+ refcount_dec(&tbl->refcount);
+ return NULL;
+}
+
+static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *prev_anchor;
+
+ if (prev_nic_matcher)
+ prev_anchor = prev_nic_matcher->e_anchor;
+ else
+ prev_anchor = nic_tbl->s_anchor;
+
+ /* Connect previous anchor hash table to next matcher or to the default address */
+ if (next_nic_matcher) {
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
+ prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ } else {
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+ prev_anchor->ste_arr[0].next_htbl = NULL;
+ }
+
+ return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
+ &info, true);
+}
+
+static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher *prev_matcher, *next_matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret = 0;
+
+ if (list_is_last(&matcher->matcher_list, &tbl->matcher_list))
+ next_matcher = NULL;
+ else
+ next_matcher = list_next_entry(matcher, matcher_list);
+
+ if (matcher->matcher_list.prev == &tbl->matcher_list)
+ prev_matcher = NULL;
+ else
+ prev_matcher = list_prev_entry(matcher, matcher_list);
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ ret = dr_matcher_disconnect(dmn, &tbl->rx,
+ next_matcher ? &next_matcher->rx : NULL,
+ prev_matcher ? &prev_matcher->rx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ ret = dr_matcher_disconnect(dmn, &tbl->tx,
+ next_matcher ? &next_matcher->tx : NULL,
+ prev_matcher ? &prev_matcher->tx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ list_del(&matcher->matcher_list);
+
+ return 0;
+}
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+
+ if (refcount_read(&matcher->refcount) > 1)
+ return -EBUSY;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ dr_matcher_remove_from_tbl(matcher);
+ dr_matcher_uninit(matcher);
+ refcount_dec(&matcher->tbl->refcount);
+
+ mutex_unlock(&tbl->dmn->mutex);
+ kfree(matcher);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
new file mode 100644
index 000000000000..3bc3f66b8fa8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
+
+struct mlx5dr_rule_action_member {
+ struct mlx5dr_action *action;
+ struct list_head list;
+};
+
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info_last;
+ struct mlx5dr_ste *last_ste;
+
+ /* The new entry will be inserted after the last */
+ last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
+ WARN_ON(!last_ste);
+
+ ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
+ if (!ste_info_last)
+ return -ENOMEM;
+
+ mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_get_icm_addr(new_last_ste));
+ list_add_tail(&new_last_ste->miss_list_node, miss_list);
+
+ mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
+ 0, last_ste->hw_ste,
+ ste_info_last, send_list, true);
+
+ return 0;
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_htbl *new_htbl;
+ struct mlx5dr_ste *ste;
+
+ /* Create new table for miss entry */
+ new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!new_htbl) {
+ mlx5dr_dbg(dmn, "Failed allocating collision table\n");
+ return NULL;
+ }
+
+ /* One and only entry, never grows */
+ ste = new_htbl->ste_arr;
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_htbl_get(new_htbl);
+
+ return ste;
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste,
+ struct mlx5dr_ste *orig_ste)
+{
+ struct mlx5dr_ste *ste;
+
+ ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+ if (!ste) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
+ return NULL;
+ }
+
+ ste->ste_chain_location = orig_ste->ste_chain_location;
+
+ /* In collision entry, all members share the same miss_list_head */
+ ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+
+ /* Next table */
+ if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
+ DR_CHUNK_SIZE_1)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ goto free_tbl;
+ }
+
+ return ste;
+
+free_tbl:
+ mlx5dr_ste_free(ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static int
+dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
+ struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ list_del(&ste_info->send_list);
+ ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
+ ste_info->size, ste_info->offset);
+ if (ret)
+ goto out;
+ /* Copy data to ste, only reduced size, the last 16B (mask)
+ * is already written to the hw.
+ */
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+
+out:
+ kfree(ste_info);
+ return ret;
+}
+
+static int dr_rule_send_update_list(struct list_head *send_ste_list,
+ struct mlx5dr_domain *dmn,
+ bool is_reverse)
+{
+ struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+ int ret;
+
+ if (is_reverse) {
+ list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
+ send_ste_list, send_list) {
+ ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+ dmn);
+ if (ret)
+ return ret;
+ }
+ } else {
+ list_for_each_entry_safe(ste_info, tmp_ste_info,
+ send_ste_list, send_list) {
+ ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+ dmn);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlx5dr_ste *
+dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
+{
+ struct mlx5dr_ste *ste;
+
+ if (list_empty(miss_list))
+ return NULL;
+
+ /* Check if hw_ste is present in the list */
+ list_for_each_entry(ste, miss_list, miss_list_node) {
+ if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+ return ste;
+ }
+
+ return NULL;
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct list_head *update_list,
+ struct mlx5dr_ste *col_ste,
+ u8 *hw_ste)
+{
+ struct mlx5dr_ste *new_ste;
+ int ret;
+
+ new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+ if (!new_ste)
+ return NULL;
+
+ /* In collision entry, all members share the same miss_list_head */
+ new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+
+ /* Update the previous from the list */
+ ret = dr_rule_append_to_miss_list(new_ste,
+ mlx5dr_ste_get_miss_list(col_ste),
+ update_list);
+ if (ret) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ goto err_exit;
+ }
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *cur_ste,
+ struct mlx5dr_ste *new_ste)
+{
+ new_ste->next_htbl = cur_ste->next_htbl;
+ new_ste->ste_chain_location = cur_ste->ste_chain_location;
+
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
+ new_ste->next_htbl->pointing_ste = new_ste;
+
+ /* We need to copy the refcount since this ste
+ * may have been traversed several times
+ */
+ refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+
+ /* Link old STEs rule_mem list to the new ste */
+ mlx5dr_rule_update_rule_member(cur_ste, new_ste);
+ INIT_LIST_HEAD(&new_ste->rule_list);
+ list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *cur_ste,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+ bool use_update_list = false;
+ u8 hw_ste[DR_STE_SIZE] = {};
+ struct mlx5dr_ste *new_ste;
+ int new_idx;
+ u8 sb_idx;
+
+ /* Copy STE mask from the matcher */
+ sb_idx = cur_ste->ste_chain_location - 1;
+ mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
+
+ /* Copy STE control and tag */
+ memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+
+ new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
+ new_ste = &new_htbl->ste_arr[new_idx];
+
+ if (mlx5dr_ste_not_used_ste(new_ste)) {
+ mlx5dr_htbl_get(new_htbl);
+ list_add_tail(&new_ste->miss_list_node,
+ mlx5dr_ste_get_miss_list(new_ste));
+ } else {
+ new_ste = dr_rule_rehash_handle_collision(matcher,
+ nic_matcher,
+ update_list,
+ new_ste,
+ hw_ste);
+ if (!new_ste) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ new_idx);
+ return NULL;
+ }
+ new_htbl->ctrl.num_of_collisions++;
+ use_update_list = true;
+ }
+
+ memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+
+ new_htbl->ctrl.num_of_valid_entries++;
+
+ if (use_update_list) {
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ goto err_exit;
+
+ mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
+ hw_ste, ste_info,
+ update_list, true);
+ }
+
+ dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct list_head *cur_miss_list,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
+
+ if (list_empty(cur_miss_list))
+ return 0;
+
+ list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
+ new_ste = dr_rule_rehash_copy_ste(matcher,
+ nic_matcher,
+ cur_ste,
+ new_htbl,
+ update_list);
+ if (!new_ste)
+ goto err_insert;
+
+ list_del(&cur_ste->miss_list_node);
+ mlx5dr_htbl_put(cur_ste->htbl);
+ }
+ return 0;
+
+err_insert:
+ mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
+ WARN_ON(true);
+ return -EINVAL;
+}
+
+static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste *cur_ste;
+ int cur_entries;
+ int err = 0;
+ int i;
+
+ cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+
+ if (cur_entries < 1) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cur_entries; i++) {
+ cur_ste = &cur_htbl->ste_arr[i];
+ if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
+ continue;
+
+ err = dr_rule_rehash_copy_miss_list(matcher,
+ nic_matcher,
+ mlx5dr_ste_get_miss_list(cur_ste),
+ new_htbl,
+ update_list);
+ if (err)
+ goto clean_copy;
+ }
+
+clean_copy:
+ return err;
+}
+
+static struct mlx5dr_ste_htbl *
+dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 ste_location,
+ struct list_head *update_list,
+ enum mlx5dr_icm_chunk_size new_size)
+{
+ struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_ste_send_info *ste_info;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ u8 formatted_ste[DR_STE_SIZE] = {};
+ LIST_HEAD(rehash_table_send_list);
+ struct mlx5dr_ste *ste_to_update;
+ struct mlx5dr_ste_htbl *new_htbl;
+ int err;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ return NULL;
+
+ new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ new_size,
+ cur_htbl->lu_type,
+ cur_htbl->byte_mask);
+ if (!new_htbl) {
+ mlx5dr_err(dmn, "Failed to allocate new hash table\n");
+ goto free_ste_info;
+ }
+
+ /* Write new table to HW */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ nic_dmn,
+ new_htbl,
+ formatted_ste,
+ &info);
+
+ new_htbl->pointing_ste = cur_htbl->pointing_ste;
+ new_htbl->pointing_ste->next_htbl = new_htbl;
+ err = dr_rule_rehash_copy_htbl(matcher,
+ nic_matcher,
+ cur_htbl,
+ new_htbl,
+ &rehash_table_send_list);
+ if (err)
+ goto free_new_htbl;
+
+ if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
+ nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
+ mlx5dr_err(dmn, "Failed writing table to HW\n");
+ goto free_new_htbl;
+ }
+
+ /* Writing to the hw is done in regular order of rehash_table_send_list,
+ * in order to have the origin data written before the miss address of
+ * collision entries, if exists.
+ */
+ if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
+ mlx5dr_err(dmn, "Failed updating table to HW\n");
+ goto free_ste_list;
+ }
+
+ /* Connect previous hash table to current */
+ if (ste_location == 1) {
+ /* The previous table is an anchor, anchors size is always one STE */
+ struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
+
+ /* On matcher s_anchor we keep an extra refcount */
+ mlx5dr_htbl_get(new_htbl);
+ mlx5dr_htbl_put(cur_htbl);
+
+ nic_matcher->s_htbl = new_htbl;
+
+ /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
+ * (48B len) which works only on first 32B
+ */
+ mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ new_htbl->chunk->icm_addr,
+ new_htbl->chunk->num_of_entries);
+
+ ste_to_update = &prev_htbl->ste_arr[0];
+ } else {
+ mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ new_htbl);
+ ste_to_update = cur_htbl->pointing_ste;
+ }
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
+ 0, ste_to_update->hw_ste, ste_info,
+ update_list, false);
+
+ return new_htbl;
+
+free_ste_list:
+ /* Clean all ste_info's from the new table */
+ list_for_each_entry_safe(del_ste_info, tmp_ste_info,
+ &rehash_table_send_list, send_list) {
+ list_del(&del_ste_info->send_list);
+ kfree(del_ste_info);
+ }
+
+free_new_htbl:
+ mlx5dr_ste_htbl_free(new_htbl);
+free_ste_info:
+ kfree(ste_info);
+ mlx5dr_info(dmn, "Failed creating rehash table\n");
+ return NULL;
+}
+
+static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 ste_location,
+ struct list_head *update_list)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+ enum mlx5dr_icm_chunk_size new_size;
+
+ new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+ new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
+
+ if (new_size == cur_htbl->chunk_size)
+ return NULL; /* Skip rehash, we already at the max size */
+
+ return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
+ update_list, new_size);
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *hw_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+ struct mlx5dr_ste *new_ste;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ return NULL;
+
+ new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
+ if (!new_ste)
+ goto free_send_info;
+
+ if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ goto err_exit;
+ }
+
+ mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
+ ste_info, send_list, false);
+
+ ste->htbl->ctrl.num_of_collisions++;
+ ste->htbl->ctrl.num_of_valid_entries++;
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+free_send_info:
+ kfree(ste_info);
+ return NULL;
+}
+
+static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ struct mlx5dr_rule_action_member *tmp;
+
+ list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
+ list_del(&action_mem->list);
+ refcount_dec(&action_mem->action->refcount);
+ kvfree(action_mem);
+ }
+}
+
+static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ int i;
+
+ for (i = 0; i < num_actions; i++) {
+ action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
+ if (!action_mem)
+ goto free_action_members;
+
+ action_mem->action = actions[i];
+ INIT_LIST_HEAD(&action_mem->list);
+ list_add_tail(&action_mem->list, &rule->rule_actions_list);
+ refcount_inc(&action_mem->action->refcount);
+ }
+
+ return 0;
+
+free_action_members:
+ dr_rule_remove_action_members(rule);
+ return -ENOMEM;
+}
+
+/* While the pointer of ste is no longer valid, like while moving ste to be
+ * the first in the miss_list, and to be in the origin table,
+ * all rule-members that are attached to this ste should update their ste member
+ * to the new pointer
+ */
+void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste *new_ste)
+{
+ struct mlx5dr_rule_member *rule_mem;
+
+ if (!list_empty(&ste->rule_list))
+ list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
+ rule_mem->ste = new_ste;
+}
+
+static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule)
+{
+ struct mlx5dr_rule_member *rule_mem;
+ struct mlx5dr_rule_member *tmp_mem;
+
+ if (list_empty(&nic_rule->rule_members_list))
+ return;
+ list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
+ list_del(&rule_mem->list);
+ list_del(&rule_mem->use_ste_list);
+ mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
+ kvfree(rule_mem);
+ }
+}
+
+static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+
+ if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+ return false;
+
+ if (!ctrl->may_grow)
+ return false;
+
+ if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
+ (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
+ return true;
+
+ return false;
+}
+
+static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste *ste)
+{
+ struct mlx5dr_rule_member *rule_mem;
+
+ rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
+ if (!rule_mem)
+ return -ENOMEM;
+
+ rule_mem->ste = ste;
+ list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
+
+ list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
+
+ return 0;
+}
+
+static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste *last_ste,
+ u8 *hw_ste_arr,
+ u32 new_hw_ste_arr_sz)
+{
+ struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
+ struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
+ u8 num_of_builders = nic_matcher->num_of_builders;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ u8 *curr_hw_ste, *prev_hw_ste;
+ struct mlx5dr_ste *action_ste;
+ int i, k, ret;
+
+ /* Two cases:
+ * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
+ * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
+ * to support the action.
+ */
+ if (num_of_builders == new_hw_ste_arr_sz)
+ return 0;
+
+ for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
+ curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
+ prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
+ action_ste = dr_rule_create_collision_htbl(matcher,
+ nic_matcher,
+ curr_hw_ste);
+ if (!action_ste)
+ return -ENOMEM;
+
+ mlx5dr_ste_get(action_ste);
+
+ /* While free ste we go over the miss list, so add this ste to the list */
+ list_add_tail(&action_ste->miss_list_node,
+ mlx5dr_ste_get_miss_list(action_ste));
+
+ ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
+ GFP_KERNEL);
+ if (!ste_info_arr[k])
+ goto err_exit;
+
+ /* Point current ste to the new action */
+ mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ ret = dr_rule_add_member(nic_rule, action_ste);
+ if (ret) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ goto free_ste_info;
+ }
+ mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
+ curr_hw_ste,
+ ste_info_arr[k],
+ send_ste_list, false);
+ }
+
+ return 0;
+
+free_ste_info:
+ kfree(ste_info_arr[k]);
+err_exit:
+ mlx5dr_ste_put(action_ste, matcher, nic_matcher);
+ return -ENOMEM;
+}
+
+static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ struct mlx5dr_ste *ste,
+ u8 ste_location,
+ u8 *hw_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+
+ /* Take ref on table, only on first time this ste is used */
+ mlx5dr_htbl_get(cur_htbl);
+
+ /* new entry -> new branch */
+ list_add_tail(&ste->miss_list_node, miss_list);
+
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+
+ ste->ste_chain_location = ste_location;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ goto clean_ste_setting;
+
+ if (mlx5dr_ste_create_next_htbl(matcher,
+ nic_matcher,
+ ste,
+ hw_ste,
+ DR_CHUNK_SIZE_1)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ goto clean_ste_info;
+ }
+
+ cur_htbl->ctrl.num_of_valid_entries++;
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
+ ste_info, send_list, false);
+
+ return 0;
+
+clean_ste_info:
+ kfree(ste_info);
+clean_ste_setting:
+ list_del_init(&ste->miss_list_node);
+ mlx5dr_htbl_put(cur_htbl);
+
+ return -ENOMEM;
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 *hw_ste,
+ u8 ste_location,
+ struct mlx5dr_ste_htbl **put_htbl)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ struct mlx5dr_ste_htbl *new_htbl;
+ struct mlx5dr_ste *matched_ste;
+ struct list_head *miss_list;
+ bool skip_rehash = false;
+ struct mlx5dr_ste *ste;
+ int index;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+again:
+ index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
+ miss_list = &cur_htbl->chunk->miss_list[index];
+ ste = &cur_htbl->ste_arr[index];
+
+ if (mlx5dr_ste_not_used_ste(ste)) {
+ if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
+ ste, ste_location,
+ hw_ste, miss_list,
+ send_ste_list))
+ return NULL;
+ } else {
+ /* Hash table index in use, check if this ste is in the miss list */
+ matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
+ if (matched_ste) {
+ /* If it is last STE in the chain, and has the same tag
+ * it means that all the previous stes are the same,
+ * if so, this rule is duplicated.
+ */
+ if (mlx5dr_ste_is_last_in_rule(nic_matcher,
+ matched_ste->ste_chain_location)) {
+ mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
+ return NULL;
+ }
+ return matched_ste;
+ }
+
+ if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
+ /* Hash table index in use, try to resize of the hash */
+ skip_rehash = true;
+
+ /* Hold the table till we update.
+ * Release in dr_rule_create_rule()
+ */
+ *put_htbl = cur_htbl;
+ mlx5dr_htbl_get(cur_htbl);
+
+ new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
+ ste_location, send_ste_list);
+ if (!new_htbl) {
+ mlx5dr_htbl_put(cur_htbl);
+ mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
+ cur_htbl->chunk_size);
+ } else {
+ cur_htbl = new_htbl;
+ }
+ goto again;
+ } else {
+ /* Hash table index in use, add another collision (miss) */
+ ste = dr_rule_handle_collision(matcher,
+ nic_matcher,
+ ste,
+ hw_ste,
+ miss_list,
+ send_ste_list);
+ if (!ste) {
+ mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
+ index);
+ return NULL;
+ }
+ }
+ }
+ return ste;
+}
+
+static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
+ u32 s_idx, u32 e_idx)
+{
+ u32 i;
+
+ for (i = s_idx; i < e_idx; i++) {
+ if (value[i] & ~mask[i]) {
+ pr_info("Rule parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ struct mlx5dr_match_param *param)
+{
+ u8 match_criteria = matcher->match_criteria;
+ size_t value_size = value->match_sz;
+ u8 *mask_p = (u8 *)&matcher->mask;
+ u8 *param_p = (u8 *)param;
+ u32 s_idx, e_idx;
+
+ if (!value_size ||
+ (value_size > sizeof(struct mlx5dr_match_param) ||
+ (value_size % sizeof(u32)))) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+ return false;
+ }
+
+ mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+ s_idx = offsetof(struct mlx5dr_match_param, outer);
+ e_idx = min(s_idx + sizeof(param->outer), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc);
+ e_idx = min(s_idx + sizeof(param->misc), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+ s_idx = offsetof(struct mlx5dr_match_param, inner);
+ e_idx = min(s_idx + sizeof(param->inner), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc2);
+ e_idx = min(s_idx + sizeof(param->misc2), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc3);
+ e_idx = min(s_idx + sizeof(param->misc3), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule)
+{
+ dr_rule_clean_rule_members(rule, nic_rule);
+ return 0;
+}
+
+static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
+{
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ dr_rule_destroy_rule_nic(rule, &rule->tx);
+ return 0;
+}
+
+static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_rule_destroy_rule_nic(rule, &rule->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_rule_destroy_rule_fdb(rule);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dr_rule_remove_action_members(rule);
+ kfree(rule);
+ return 0;
+}
+
+static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
+{
+ return (param->outer.ip_version == 6 ||
+ param->inner.ip_version == 6 ||
+ param->outer.ethertype == ETH_P_IPV6 ||
+ param->inner.ethertype == ETH_P_IPV6);
+}
+
+static bool dr_rule_skip(enum mlx5dr_domain_type domain,
+ enum mlx5dr_ste_entry_type ste_type,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value)
+{
+ if (domain != MLX5DR_DOMAIN_TYPE_FDB)
+ return false;
+
+ if (mask->misc.source_port) {
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ if (value->misc.source_port != WIRE_PORT)
+ return true;
+
+ if (ste_type == MLX5DR_STE_TYPE_TX)
+ if (value->misc.source_port == WIRE_PORT)
+ return true;
+ }
+
+ /* Metadata C can be used to describe the source vport */
+ if (mask->misc2.metadata_reg_c_0) {
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
+ return true;
+
+ if (ste_type == MLX5DR_STE_TYPE_TX)
+ if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
+ return true;
+ }
+ return false;
+}
+
+static int
+dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_match_param *param,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ struct mlx5dr_ste_htbl *htbl = NULL;
+ struct mlx5dr_ste_htbl *cur_htbl;
+ struct mlx5dr_ste *ste = NULL;
+ LIST_HEAD(send_ste_list);
+ u8 *hw_ste_arr = NULL;
+ u32 new_hw_ste_arr_sz;
+ int ret, i;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+ INIT_LIST_HEAD(&nic_rule->rule_members_list);
+
+ if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
+ return 0;
+
+ ret = mlx5dr_matcher_select_builders(matcher,
+ nic_matcher,
+ dr_rule_is_ipv6(param));
+ if (ret)
+ goto out_err;
+
+ hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Set the tag values inside the ste array */
+ ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
+ if (ret)
+ goto free_hw_ste;
+
+ /* Set the actions values/addresses inside the ste array */
+ ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
+ num_actions, hw_ste_arr,
+ &new_hw_ste_arr_sz);
+ if (ret)
+ goto free_hw_ste;
+
+ cur_htbl = nic_matcher->s_htbl;
+
+ /* Go over the array of STEs, and build dr_ste accordingly.
+ * The loop is over only the builders which are equal or less to the
+ * number of stes, in case we have actions that lives in other stes.
+ */
+ for (i = 0; i < nic_matcher->num_of_builders; i++) {
+ /* Calculate CRC and keep new ste entry */
+ u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
+
+ ste = dr_rule_handle_ste_branch(rule,
+ nic_rule,
+ &send_ste_list,
+ cur_htbl,
+ cur_hw_ste_ent,
+ i + 1,
+ &htbl);
+ if (!ste) {
+ mlx5dr_err(dmn, "Failed creating next branch\n");
+ ret = -ENOENT;
+ goto free_rule;
+ }
+
+ cur_htbl = ste->next_htbl;
+
+ /* Keep all STEs in the rule struct */
+ ret = dr_rule_add_member(nic_rule, ste);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
+ goto free_ste;
+ }
+
+ mlx5dr_ste_get(ste);
+ }
+
+ /* Connect actions */
+ ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
+ ste, hw_ste_arr, new_hw_ste_arr_sz);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed apply actions\n");
+ goto free_rule;
+ }
+ ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed sending ste!\n");
+ goto free_rule;
+ }
+
+ if (htbl)
+ mlx5dr_htbl_put(htbl);
+
+ return 0;
+
+free_ste:
+ mlx5dr_ste_put(ste, matcher, nic_matcher);
+free_rule:
+ dr_rule_clean_rule_members(rule, nic_rule);
+ /* Clean all ste_info's */
+ list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
+ list_del(&ste_info->send_list);
+ kfree(ste_info);
+ }
+free_hw_ste:
+ kfree(hw_ste_arr);
+out_err:
+ return ret;
+}
+
+static int
+dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
+ struct mlx5dr_match_param *param,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_match_param copy_param = {};
+ int ret;
+
+ /* Copy match_param since they will be consumed during the first
+ * nic_rule insertion.
+ */
+ memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
+
+ ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
+ num_actions, actions);
+ if (ret)
+ return ret;
+
+ ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
+ num_actions, actions);
+ if (ret)
+ goto destroy_rule_nic_rx;
+
+ return 0;
+
+destroy_rule_nic_rx:
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ return ret;
+}
+
+static struct mlx5dr_rule *
+dr_rule_create_rule(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_match_param param = {};
+ struct mlx5dr_rule *rule;
+ int ret;
+
+ if (!dr_rule_verify(matcher, value, &param))
+ return NULL;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->matcher = matcher;
+ INIT_LIST_HEAD(&rule->rule_actions_list);
+
+ ret = dr_rule_add_action_members(rule, num_actions, actions);
+ if (ret)
+ goto free_rule;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ rule->rx.nic_matcher = &matcher->rx;
+ ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
+ num_actions, actions);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ rule->tx.nic_matcher = &matcher->tx;
+ ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
+ num_actions, actions);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ rule->rx.nic_matcher = &matcher->rx;
+ rule->tx.nic_matcher = &matcher->tx;
+ ret = dr_rule_create_rule_fdb(rule, &param,
+ num_actions, actions);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto remove_action_members;
+
+ return rule;
+
+remove_action_members:
+ dr_rule_remove_action_members(rule);
+free_rule:
+ kfree(rule);
+ mlx5dr_info(dmn, "Failed creating rule\n");
+ return NULL;
+}
+
+struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_rule *rule;
+
+ mutex_lock(&matcher->tbl->dmn->mutex);
+ refcount_inc(&matcher->refcount);
+
+ rule = dr_rule_create_rule(matcher, value, num_actions, actions);
+ if (!rule)
+ refcount_dec(&matcher->refcount);
+
+ mutex_unlock(&matcher->tbl->dmn->mutex);
+
+ return rule;
+}
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_table *tbl = rule->matcher->tbl;
+ int ret;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ ret = dr_rule_destroy_rule(rule);
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ if (!ret)
+ refcount_dec(&matcher->refcount);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
new file mode 100644
index 000000000000..ef0dea44f3b3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -0,0 +1,976 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define QUEUE_SIZE 128
+#define SIGNAL_PER_DIV_QUEUE 16
+#define TH_NUMS_TO_DRAIN 2
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct dr_data_seg {
+ u64 addr;
+ u32 length;
+ u32 lkey;
+ unsigned int send_flags;
+};
+
+struct postsend_info {
+ struct dr_data_seg write;
+ struct dr_data_seg read;
+ u64 remote_addr;
+ u32 rkey;
+};
+
+struct dr_qp_rtr_attr {
+ struct mlx5dr_cmd_gid_attr dgid_attr;
+ enum ib_mtu mtu;
+ u32 qp_num;
+ u16 port_num;
+ u8 min_rnr_timer;
+ u8 sgid_index;
+ u16 udp_src_port;
+};
+
+struct dr_qp_rts_attr {
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+};
+
+struct dr_qp_init_attr {
+ u32 cqn;
+ u32 pdn;
+ u32 max_send_wr;
+ struct mlx5_uars_page *uar;
+};
+
+static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
+{
+ unsigned int idx;
+ u8 opcode;
+
+ opcode = get_cqe_opcode(cqe64);
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ idx = be16_to_cpu(cqe64->wqe_counter) &
+ (dr_cq->qp->sq.wqe_cnt - 1);
+ dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+ } else if (opcode == MLX5_CQE_RESP_ERR) {
+ ++dr_cq->qp->sq.cc;
+ } else {
+ idx = be16_to_cpu(cqe64->wqe_counter) &
+ (dr_cq->qp->sq.wqe_cnt - 1);
+ dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+
+ return CQ_OK;
+ }
+
+ return CQ_POLL_ERR;
+}
+
+static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
+ if (!cqe64)
+ return CQ_EMPTY;
+
+ mlx5_cqwq_pop(&dr_cq->wq);
+ err = dr_parse_cqe(dr_cq, cqe64);
+ mlx5_cqwq_update_db_record(&dr_cq->wq);
+
+ return err;
+}
+
+static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
+{
+ int npolled;
+ int err = 0;
+
+ for (npolled = 0; npolled < ne; ++npolled) {
+ err = dr_cq_poll_one(dr_cq);
+ if (err != CQ_OK)
+ break;
+ }
+
+ return err == CQ_POLL_ERR ? err : npolled;
+}
+
+static void dr_qp_event(struct mlx5_core_qp *mqp, int event)
+{
+ pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn);
+}
+
+static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct dr_qp_init_attr *attr)
+{
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
+ struct mlx5_wq_param wqp;
+ struct mlx5dr_qp *dr_qp;
+ int inlen;
+ void *qpc;
+ void *in;
+ int err;
+
+ dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
+ if (!dr_qp)
+ return NULL;
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ dr_qp->rq.pc = 0;
+ dr_qp->rq.cc = 0;
+ dr_qp->rq.wqe_cnt = 4;
+ dr_qp->sq.pc = 0;
+ dr_qp->sq.cc = 0;
+ dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
+
+ MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+ MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+ err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
+ &dr_qp->wq_ctrl);
+ if (err) {
+ mlx5_core_info(mdev, "Can't create QP WQ\n");
+ goto err_wq;
+ }
+
+ dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
+ sizeof(dr_qp->sq.wqe_head[0]),
+ GFP_KERNEL);
+
+ if (!dr_qp->sq.wqe_head) {
+ mlx5_core_warn(mdev, "Can't allocate wqe head\n");
+ goto err_wqe_head;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ dr_qp->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, attr->pdn);
+ MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+ MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+ MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+ mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ in, pas));
+
+ err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
+ kfree(in);
+
+ if (err) {
+ mlx5_core_warn(mdev, " Can't create QP\n");
+ goto err_in;
+ }
+ dr_qp->mqp.event = dr_qp_event;
+ dr_qp->uar = attr->uar;
+
+ return dr_qp;
+
+err_in:
+ kfree(dr_qp->sq.wqe_head);
+err_wqe_head:
+ mlx5_wq_destroy(&dr_qp->wq_ctrl);
+err_wq:
+ kfree(dr_qp);
+ return NULL;
+}
+
+static void dr_destroy_qp(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp)
+{
+ mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
+ kfree(dr_qp->sq.wqe_head);
+ mlx5_wq_destroy(&dr_qp->wq_ctrl);
+ kfree(dr_qp);
+}
+
+static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
+{
+ dma_wmb();
+ *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff);
+
+ /* After wmb() the hw aware of new work */
+ wmb();
+
+ mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
+}
+
+static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+ u32 rkey, struct dr_data_seg *data_seg,
+ u32 opcode, int nreq)
+{
+ struct mlx5_wqe_raddr_seg *wq_raddr;
+ struct mlx5_wqe_ctrl_seg *wq_ctrl;
+ struct mlx5_wqe_data_seg *wq_dseg;
+ unsigned int size;
+ unsigned int idx;
+
+ size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 +
+ sizeof(*wq_raddr) / 16;
+
+ idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
+
+ wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+ wq_ctrl->imm = 0;
+ wq_ctrl->fm_ce_se = (data_seg->send_flags) ?
+ MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
+ opcode);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8);
+ wq_raddr = (void *)(wq_ctrl + 1);
+ wq_raddr->raddr = cpu_to_be64(remote_addr);
+ wq_raddr->rkey = cpu_to_be32(rkey);
+ wq_raddr->reserved = 0;
+
+ wq_dseg = (void *)(wq_raddr + 1);
+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
+
+ dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
+
+ if (nreq)
+ dr_cmd_notify_hw(dr_qp, wq_ctrl);
+}
+
+static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
+{
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0);
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->read, MLX5_OPCODE_RDMA_READ, 1);
+}
+
+/**
+ * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
+ * with send_list parameters:
+ *
+ * @ste: The data that attached to this specific ste
+ * @size: of data to write
+ * @offset: of the data from start of the hw_ste entry
+ * @data: data
+ * @ste_info: ste to be sent with send_list
+ * @send_list: to append into it
+ * @copy_data: if true indicates that the data should be kept because
+ * it's not backuped any where (like in re-hash).
+ * if false, it lets the data to be updated after
+ * it was added to the list.
+ */
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+ u16 offset, u8 *data,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_list,
+ bool copy_data)
+{
+ ste_info->size = size;
+ ste_info->ste = ste;
+ ste_info->offset = offset;
+
+ if (copy_data) {
+ memcpy(ste_info->data_cont, data, size);
+ ste_info->data = ste_info->data_cont;
+ } else {
+ ste_info->data = data;
+ }
+
+ list_add_tail(&ste_info->send_list, send_list);
+}
+
+/* The function tries to consume one wc each time, unless the queue is full, in
+ * that case, which means that the hw is behind the sw in a full queue len
+ * the function will drain the cq till it empty.
+ */
+static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring)
+{
+ bool is_drain = false;
+ int ne;
+
+ if (send_ring->pending_wqe < send_ring->signal_th)
+ return 0;
+
+ /* Queue is full start drain it */
+ if (send_ring->pending_wqe >=
+ dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
+ is_drain = true;
+
+ do {
+ ne = dr_poll_cq(send_ring->cq, 1);
+ if (ne < 0)
+ return ne;
+ else if (ne == 1)
+ send_ring->pending_wqe -= send_ring->signal_th;
+ } while (is_drain && send_ring->pending_wqe);
+
+ return 0;
+}
+
+static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
+ struct postsend_info *send_info)
+{
+ send_ring->pending_wqe++;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+
+ send_ring->pending_wqe++;
+ send_info->read.length = send_info->write.length;
+ /* Read into the same write area */
+ send_info->read.addr = (uintptr_t)send_info->write.addr;
+ send_info->read.lkey = send_ring->mr->mkey.key;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->read.send_flags = IB_SEND_SIGNALED;
+ else
+ send_info->read.send_flags = 0;
+}
+
+static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
+ struct postsend_info *send_info)
+{
+ struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+ u32 buff_offset;
+ int ret;
+
+ ret = dr_handle_pending_wc(dmn, send_ring);
+ if (ret)
+ return ret;
+
+ if (send_info->write.length > dmn->info.max_inline_size) {
+ buff_offset = (send_ring->tx_head &
+ (dmn->send_ring->signal_th - 1)) *
+ send_ring->max_post_send_size;
+ /* Copy to ring mr */
+ memcpy(send_ring->buf + buff_offset,
+ (void *)(uintptr_t)send_info->write.addr,
+ send_info->write.length);
+ send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
+ send_info->write.lkey = send_ring->mr->mkey.key;
+ }
+
+ send_ring->tx_head++;
+ dr_fill_data_segs(send_ring, send_info);
+ dr_post_send(send_ring->qp, send_info);
+
+ return 0;
+}
+
+static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 **data,
+ u32 *byte_size,
+ int *iterations,
+ int *num_stes)
+{
+ int alloc_size;
+
+ if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
+ *iterations = htbl->chunk->byte_size /
+ dmn->send_ring->max_post_send_size;
+ *byte_size = dmn->send_ring->max_post_send_size;
+ alloc_size = *byte_size;
+ *num_stes = *byte_size / DR_STE_SIZE;
+ } else {
+ *iterations = 1;
+ *num_stes = htbl->chunk->num_of_entries;
+ alloc_size = *num_stes * DR_STE_SIZE;
+ }
+
+ *data = kzalloc(alloc_size, GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
+ *
+ * @dmn: Domain
+ * @ste: The ste struct that contains the data (at
+ * least part of it)
+ * @data: The real data to send size data
+ * @size: for writing.
+ * @offset: The offset from the icm mapped data to
+ * start write to this for write only part of the
+ * buffer.
+ *
+ * Return: 0 on success.
+ */
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
+ u8 *data, u16 size, u16 offset)
+{
+ struct postsend_info send_info = {};
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
+ send_info.rkey = ste->htbl->chunk->rkey;
+
+ return dr_postsend_icm_data(dmn, &send_info);
+}
+
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste, u8 *mask)
+{
+ u32 byte_size = htbl->chunk->byte_size;
+ int num_stes_per_iter;
+ int iterations;
+ u8 *data;
+ int ret;
+ int i;
+ int j;
+
+ ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+ &iterations, &num_stes_per_iter);
+ if (ret)
+ return ret;
+
+ /* Send the data iteration times */
+ for (i = 0; i < iterations; i++) {
+ u32 ste_index = i * (byte_size / DR_STE_SIZE);
+ struct postsend_info send_info = {};
+
+ /* Copy all ste's on the data buffer
+ * need to add the bit_mask
+ */
+ for (j = 0; j < num_stes_per_iter; j++) {
+ u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste;
+ u32 ste_off = j * DR_STE_SIZE;
+
+ if (mlx5dr_ste_is_not_valid_entry(hw_ste)) {
+ memcpy(data + ste_off,
+ formatted_ste, DR_STE_SIZE);
+ } else {
+ /* Copy data */
+ memcpy(data + ste_off,
+ htbl->ste_arr[ste_index + j].hw_ste,
+ DR_STE_SIZE_REDUCED);
+ /* Copy bit_mask */
+ memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
+ mask, DR_STE_SIZE_MASK);
+ }
+ }
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr =
+ mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
+ send_info.rkey = htbl->chunk->rkey;
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ goto out_free;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
+/* Initialize htble with default STEs */
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *ste_init_data,
+ bool update_hw_ste)
+{
+ u32 byte_size = htbl->chunk->byte_size;
+ int iterations;
+ int num_stes;
+ u8 *data;
+ int ret;
+ int i;
+
+ ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+ &iterations, &num_stes);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_stes; i++) {
+ u8 *copy_dst;
+
+ /* Copy the same ste on the data buffer */
+ copy_dst = data + i * DR_STE_SIZE;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
+
+ if (update_hw_ste) {
+ /* Copy the reduced ste to hash table ste_arr */
+ copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
+ }
+ }
+
+ /* Send the data iteration times */
+ for (i = 0; i < iterations; i++) {
+ u8 ste_index = i * (byte_size / DR_STE_SIZE);
+ struct postsend_info send_info = {};
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr =
+ mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
+ send_info.rkey = htbl->chunk->rkey;
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ goto out_free;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action)
+{
+ struct postsend_info send_info = {};
+ int ret;
+
+ send_info.write.addr = (uintptr_t)action->rewrite.data;
+ send_info.write.length = action->rewrite.chunk->byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr = action->rewrite.chunk->mr_addr;
+ send_info.rkey = action->rewrite.chunk->rkey;
+
+ mutex_lock(&dmn->mutex);
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ mutex_unlock(&dmn->mutex);
+
+ return ret;
+}
+
+static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ int port)
+{
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ struct dr_qp_rts_attr *attr)
+{
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
+
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ struct dr_qp_rtr_attr *attr)
+{
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
+
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn);
+
+ MLX5_SET(qpc, qpc, mtu, attr->mtu);
+ MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
+ attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
+ attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
+ MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
+ attr->sgid_index);
+
+ if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
+ MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
+ attr->udp_src_port);
+
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
+ struct dr_qp_rts_attr rts_attr = {};
+ struct dr_qp_rtr_attr rtr_attr = {};
+ enum ib_mtu mtu = IB_MTU_1024;
+ u16 gid_index = 0;
+ int port = 1;
+ int ret;
+
+ /* Init */
+ ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
+ if (ret)
+ return ret;
+
+ /* RTR */
+ ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
+ if (ret)
+ return ret;
+
+ rtr_attr.mtu = mtu;
+ rtr_attr.qp_num = dr_qp->mqp.qpn;
+ rtr_attr.min_rnr_timer = 12;
+ rtr_attr.port_num = port;
+ rtr_attr.sgid_index = gid_index;
+ rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
+
+ ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
+ if (ret)
+ return ret;
+
+ /* RTS */
+ rts_attr.timeout = 14;
+ rts_attr.retry_cnt = 7;
+ rts_attr.rnr_retry = 7;
+
+ ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void dr_cq_event(struct mlx5_core_cq *mcq,
+ enum mlx5_event event)
+{
+ pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
+}
+
+static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_uars_page *uar,
+ size_t ncqe)
+{
+ u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_wq_param wqp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5dr_cq *cq;
+ int inlen, err, eqn;
+ unsigned int irqn;
+ void *cqc, *in;
+ __be64 *pas;
+ u32 i;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return NULL;
+
+ ncqe = roundup_pow_of_two(ncqe);
+ MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ goto out;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ goto err_cqwq;
+
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ if (err) {
+ kvfree(in);
+ goto err_cqwq;
+ }
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
+
+ cq->mcq.event = dr_cq_event;
+
+ err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+ kvfree(in);
+
+ if (err)
+ goto err_cqwq;
+
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+ cq->mcq.vector = 0;
+ cq->mcq.irqn = irqn;
+ cq->mcq.uar = uar;
+
+ return cq;
+
+err_cqwq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+out:
+ kfree(cq);
+ return NULL;
+}
+
+static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
+{
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+ kfree(cq);
+}
+
+static int
+dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey)
+{
+ u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
+}
+
+static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
+ u32 pdn, void *buf, size_t size)
+{
+ struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ if (!mr)
+ return NULL;
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, buf, size,
+ DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_warn(mdev, "Can't dma buf\n");
+ kfree(mr);
+ return NULL;
+ }
+
+ err = dr_create_mkey(mdev, pdn, &mr->mkey);
+ if (err) {
+ mlx5_core_warn(mdev, "Can't create mkey\n");
+ dma_unmap_single(dma_device, dma_addr, size,
+ DMA_BIDIRECTIONAL);
+ kfree(mr);
+ return NULL;
+ }
+
+ mr->dma_addr = dma_addr;
+ mr->size = size;
+ mr->addr = buf;
+
+ return mr;
+}
+
+static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
+{
+ mlx5_core_destroy_mkey(mdev, &mr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
+ DMA_BIDIRECTIONAL);
+ kfree(mr);
+}
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
+{
+ struct dr_qp_init_attr init_attr = {};
+ int cq_size;
+ int size;
+ int ret;
+
+ dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
+ if (!dmn->send_ring)
+ return -ENOMEM;
+
+ cq_size = QUEUE_SIZE + 1;
+ dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
+ if (!dmn->send_ring->cq) {
+ ret = -ENOMEM;
+ goto free_send_ring;
+ }
+
+ init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
+ init_attr.pdn = dmn->pdn;
+ init_attr.uar = dmn->uar;
+ init_attr.max_send_wr = QUEUE_SIZE;
+
+ dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
+ if (!dmn->send_ring->qp) {
+ ret = -ENOMEM;
+ goto clean_cq;
+ }
+
+ dmn->send_ring->cq->qp = dmn->send_ring->qp;
+
+ dmn->info.max_send_wr = QUEUE_SIZE;
+ dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
+ DR_STE_SIZE);
+
+ dmn->send_ring->signal_th = dmn->info.max_send_wr /
+ SIGNAL_PER_DIV_QUEUE;
+
+ /* Prepare qp to be used */
+ ret = dr_prepare_qp_to_rts(dmn);
+ if (ret)
+ goto clean_qp;
+
+ dmn->send_ring->max_post_send_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
+ DR_ICM_TYPE_STE);
+
+ /* Allocating the max size as a buffer for writing */
+ size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
+ dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
+ if (!dmn->send_ring->buf) {
+ ret = -ENOMEM;
+ goto clean_qp;
+ }
+
+ memset(dmn->send_ring->buf, 0, size);
+ dmn->send_ring->buf_size = size;
+
+ dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
+ dmn->pdn, dmn->send_ring->buf, size);
+ if (!dmn->send_ring->mr) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+
+ dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
+ dmn->pdn, dmn->send_ring->sync_buff,
+ MIN_READ_SYNC);
+ if (!dmn->send_ring->sync_mr) {
+ ret = -ENOMEM;
+ goto clean_mr;
+ }
+
+ return 0;
+
+clean_mr:
+ dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
+free_mem:
+ kfree(dmn->send_ring->buf);
+clean_qp:
+ dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
+clean_cq:
+ dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
+free_send_ring:
+ kfree(dmn->send_ring);
+
+ return ret;
+}
+
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring)
+{
+ dr_destroy_qp(dmn->mdev, send_ring->qp);
+ dr_destroy_cq(dmn->mdev, send_ring->cq);
+ dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
+ dr_dereg_mr(dmn->mdev, send_ring->mr);
+ kfree(send_ring->buf);
+ kfree(send_ring);
+}
+
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+ struct postsend_info send_info = {};
+ u8 data[DR_STE_SIZE];
+ int num_of_sends_req;
+ int ret;
+ int i;
+
+ /* Sending this amount of requests makes sure we will get drain */
+ num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
+
+ /* Send fake requests forcing the last to be signaled */
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = DR_STE_SIZE;
+ send_info.write.lkey = 0;
+ /* Using the sync_mr in order to write/read */
+ send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
+ send_info.rkey = send_ring->sync_mr->mkey.key;
+
+ for (i = 0; i < num_of_sends_req; i++) {
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ return ret;
+ }
+
+ ret = dr_handle_pending_wc(dmn, send_ring);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
new file mode 100644
index 000000000000..6b0af64536d8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -0,0 +1,2308 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include "dr_types.h"
+
+#define DR_STE_CRC_POLY 0xEDB88320L
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
+#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
+
+/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
+#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
+ (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
+ MLX5DR_STE_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_tunl_action {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+struct dr_hw_ste_format {
+ u8 ctrl[DR_STE_SIZE_CTRL];
+ u8 tag[DR_STE_SIZE_TAG];
+ u8 mask[DR_STE_SIZE_MASK];
+};
+
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 masked[DR_STE_SIZE_TAG] = {};
+ u32 crc32, index;
+ u16 bit;
+ int i;
+
+ /* Don't calculate CRC if the result is predicted */
+ if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+ return 0;
+
+ /* Mask tag using byte mask, bit per byte */
+ bit = 1 << (DR_STE_SIZE_TAG - 1);
+ for (i = 0; i < DR_STE_SIZE_TAG; i++) {
+ if (htbl->byte_mask & bit)
+ masked[i] = hw_ste->tag[i];
+
+ bit = bit >> 1;
+ }
+
+ crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
+ index = crc32 & (htbl->chunk->num_of_entries - 1);
+
+ return index;
+}
+
+static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+{
+ u16 byte_mask = 0;
+ int i;
+
+ for (i = 0; i < DR_STE_SIZE_MASK; i++) {
+ byte_mask = byte_mask << 1;
+ if (bit_mask[i] == 0xff)
+ byte_mask |= 1;
+ }
+ return byte_mask;
+}
+
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+ memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
+}
+
+void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ mlx5dr_ste_set_go_back_bit(hw_ste_p);
+}
+
+void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
+ u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
+{
+ memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
+ memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
+}
+
+static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
+{
+ hw_ste->tag[0] = 0xdc;
+ hw_ste->mask[0] = 0;
+}
+
+u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+{
+ u64 index =
+ (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
+ MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+}
+
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+}
+
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+}
+
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return &ste->htbl->miss_list[index];
+}
+
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_htbl *next_htbl)
+{
+ struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+ u8 *hw_ste = ste->hw_ste;
+
+ MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
+ MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
+ mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+
+ dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 ste_location)
+{
+ return ste_location == nic_matcher->num_of_builders;
+}
+
+/* Replace relevant fields, except of:
+ * htbl - keep the origin htbl
+ * miss_list + list - already took the src from the list.
+ * icm_addr/mr_addr - depends on the hosting table.
+ *
+ * Before:
+ * | a | -> | b | -> | c | ->
+ *
+ * After:
+ * | a | -> | c | ->
+ * While the data that was in b copied to a.
+ */
+static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
+{
+ memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+ dst->next_htbl = src->next_htbl;
+ if (dst->next_htbl)
+ dst->next_htbl->pointing_ste = dst;
+
+ refcount_set(&dst->refcount, refcount_read(&src->refcount));
+
+ INIT_LIST_HEAD(&dst->rule_list);
+ list_splice_tail_init(&src->rule_list, &dst->rule_list);
+}
+
+/* Free ste which is the head and the only one in miss_list */
+static void
+dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_send_info *ste_info_head,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+{
+ u8 tmp_data_ste[DR_STE_SIZE] = {};
+ struct mlx5dr_ste tmp_ste = {};
+ u64 miss_addr;
+
+ tmp_ste.hw_ste = tmp_data_ste;
+
+ /* Use temp ste because dr_ste_always_miss_addr
+ * touches bit_mask area which doesn't exist at ste->hw_ste.
+ */
+ memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+
+ list_del_init(&ste->miss_list_node);
+
+ /* Write full STE size in order to have "always_miss" */
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+ 0, tmp_data_ste,
+ ste_info_head,
+ send_ste_list,
+ true /* Copy data */);
+
+ stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste which is the head but NOT the only one in miss_list:
+ * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
+ */
+static void
+dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
+ struct mlx5dr_ste_send_info *ste_info_head,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+
+{
+ struct mlx5dr_ste_htbl *next_miss_htbl;
+
+ next_miss_htbl = next_ste->htbl;
+
+ /* Remove from the miss_list the next_ste before copy */
+ list_del_init(&next_ste->miss_list_node);
+
+ /* All rule-members that use next_ste should know about that */
+ mlx5dr_rule_update_rule_member(next_ste, ste);
+
+ /* Move data from next into ste */
+ dr_ste_replace(ste, next_ste);
+
+ /* Del the htbl that contains the next_ste.
+ * The origin htbl stay with the same number of entries.
+ */
+ mlx5dr_htbl_put(next_miss_htbl);
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
+ 0, ste->hw_ste,
+ ste_info_head,
+ send_ste_list,
+ true /* Copy data */);
+
+ stats_tbl->ctrl.num_of_collisions--;
+ stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste that is located in the middle of the miss list:
+ * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
+ */
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+{
+ struct mlx5dr_ste *prev_ste;
+ u64 miss_addr;
+
+ prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste,
+ miss_list_node);
+ if (!prev_ste) {
+ WARN_ON(true);
+ return;
+ }
+
+ miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
+ mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+
+ mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
+ prev_ste->hw_ste, ste_info,
+ send_ste_list, true /* Copy data*/);
+
+ list_del_init(&ste->miss_list_node);
+
+ stats_tbl->ctrl.num_of_valid_entries--;
+ stats_tbl->ctrl.num_of_collisions--;
+}
+
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_send_info ste_info_head;
+ struct mlx5dr_ste *next_ste, *first_ste;
+ bool put_on_origin_table = true;
+ struct mlx5dr_ste_htbl *stats_tbl;
+ LIST_HEAD(send_ste_list);
+
+ first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next,
+ struct mlx5dr_ste, miss_list_node);
+ stats_tbl = first_ste->htbl;
+
+ /* Two options:
+ * 1. ste is head:
+ * a. head ste is the only ste in the miss list
+ * b. head ste is not the only ste in the miss-list
+ * 2. ste is not head
+ */
+ if (first_ste == ste) { /* Ste is the head */
+ struct mlx5dr_ste *last_ste;
+
+ last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
+ struct mlx5dr_ste, miss_list_node);
+ if (last_ste == first_ste)
+ next_ste = NULL;
+ else
+ next_ste = list_entry(ste->miss_list_node.next,
+ struct mlx5dr_ste, miss_list_node);
+
+ if (!next_ste) {
+ /* One and only entry in the list */
+ dr_ste_remove_head_ste(ste, nic_matcher,
+ &ste_info_head,
+ &send_ste_list,
+ stats_tbl);
+ } else {
+ /* First but not only entry in the list */
+ dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
+ &send_ste_list, stats_tbl);
+ put_on_origin_table = false;
+ }
+ } else { /* Ste in the middle of the list */
+ dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ }
+
+ /* Update HW */
+ list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
+ &send_ste_list, send_list) {
+ list_del(&cur_ste_info->send_list);
+ mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
+ cur_ste_info->data, cur_ste_info->size,
+ cur_ste_info->offset);
+ }
+
+ if (put_on_origin_table)
+ mlx5dr_htbl_put(ste->htbl);
+}
+
+bool mlx5dr_ste_equal_tag(void *src, void *dst)
+{
+ struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
+ struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
+
+ return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
+}
+
+void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl)
+{
+ struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+
+ mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+}
+
+void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
+{
+ u8 *hw_ste = ste->hw_ste;
+
+ MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+/* The assumption here is that we don't update the ste->hw_ste if it is not
+ * used ste, so it will be all zero, checking the next_lu_type.
+ */
+bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
+
+ if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
+ MLX5DR_STE_LU_TYPE_NOP)
+ return true;
+
+ return false;
+}
+
+bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
+{
+ return !refcount_read(&ste->refcount);
+}
+
+static u16 get_bits_per_mask(u16 byte_mask)
+{
+ u16 bits = 0;
+
+ while (byte_mask) {
+ byte_mask = byte_mask & (byte_mask - 1);
+ bits++;
+ }
+
+ return bits;
+}
+
+/* Init one ste as a pattern for ste data array */
+void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste,
+ struct mlx5dr_htbl_connect_info *connect_info)
+{
+ struct mlx5dr_ste ste = {};
+
+ mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste.hw_ste = formatted_ste;
+
+ if (connect_info->type == CONNECT_HIT)
+ dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ else
+ mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+}
+
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_htbl_connect_info *connect_info,
+ bool update_hw_ste)
+{
+ u8 formatted_ste[DR_STE_SIZE] = {};
+
+ mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ nic_dmn,
+ htbl,
+ formatted_ste,
+ connect_info);
+
+ return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
+}
+
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *cur_hw_ste,
+ enum mlx5dr_icm_chunk_size log_table_size)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *next_htbl;
+
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
+ u32 bits_in_mask;
+ u8 next_lu_type;
+ u16 byte_mask;
+
+ next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
+ byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+
+ /* Don't allocate table more than required,
+ * the size of the table defined via the byte_mask, so no need
+ * to allocate more than that.
+ */
+ bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
+ log_table_size = min(log_table_size, bits_in_mask);
+
+ next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ log_table_size,
+ next_lu_type,
+ byte_mask);
+ if (!next_htbl) {
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
+ return -ENOMEM;
+ }
+
+ /* Write new table to HW */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
+ &info, false)) {
+ mlx5dr_info(dmn, "Failed writing table to HW\n");
+ goto free_table;
+ }
+
+ mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ ste->next_htbl = next_htbl;
+ next_htbl->pointing_ste = ste;
+ }
+
+ return 0;
+
+free_table:
+ mlx5dr_ste_htbl_free(next_htbl);
+ return -ENOENT;
+}
+
+static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
+{
+ struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+ int num_of_entries;
+
+ htbl->ctrl.may_grow = true;
+
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
+ htbl->ctrl.may_grow = false;
+
+ /* Threshold is 50%, one is added to table of size 1 */
+ num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+ ctrl->increase_threshold = (num_of_entries + 1) / 2;
+}
+
+struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ u8 lu_type, u16 byte_mask)
+{
+ struct mlx5dr_icm_chunk *chunk;
+ struct mlx5dr_ste_htbl *htbl;
+ int i;
+
+ htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ if (!htbl)
+ return NULL;
+
+ chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
+ if (!chunk)
+ goto out_free_htbl;
+
+ htbl->chunk = chunk;
+ htbl->lu_type = lu_type;
+ htbl->byte_mask = byte_mask;
+ htbl->ste_arr = chunk->ste_arr;
+ htbl->hw_ste_arr = chunk->hw_ste_arr;
+ htbl->miss_list = chunk->miss_list;
+ refcount_set(&htbl->refcount, 0);
+
+ for (i = 0; i < chunk->num_of_entries; i++) {
+ struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+
+ ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ ste->htbl = htbl;
+ refcount_set(&ste->refcount, 0);
+ INIT_LIST_HEAD(&ste->miss_list_node);
+ INIT_LIST_HEAD(&htbl->miss_list[i]);
+ INIT_LIST_HEAD(&ste->rule_list);
+ }
+
+ htbl->chunk_size = chunk_size;
+ dr_ste_set_ctrl(htbl);
+ return htbl;
+
+out_free_htbl:
+ kfree(htbl);
+ return NULL;
+}
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
+{
+ if (refcount_read(&htbl->refcount))
+ return -EBUSY;
+
+ mlx5dr_icm_free_chunk(htbl->chunk);
+ kfree(htbl);
+ return 0;
+}
+
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+ u8 match_criteria,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value)
+{
+ if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
+ mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_match_param *value,
+ u8 *ste_arr)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_build *sb;
+ int ret, i;
+
+ ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+ &matcher->mask, value);
+ if (ret)
+ return ret;
+
+ sb = nic_matcher->ste_builder;
+ for (i = 0; i < nic_matcher->num_of_builders; i++) {
+ mlx5dr_ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
+
+ mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
+
+ ret = sb->ste_build_tag_func(value, sb, ste_arr);
+ if (ret)
+ return ret;
+
+ /* Connect the STEs */
+ if (i < (nic_matcher->num_of_builders - 1)) {
+ /* Need the next builder for these fields,
+ * not relevant for the last ste in the chain.
+ */
+ sb++;
+ MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
+ MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ }
+ ste_arr += DR_STE_SIZE;
+ }
+ return 0;
+}
+
+static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+
+ if (mask->cvlan_tag || mask->svlan_tag) {
+ pr_info("Invalid c/svlan mask configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
+{
+ spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
+ spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
+ spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
+ spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
+ spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
+
+ spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
+
+ spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
+ spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
+ spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
+ spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
+ spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
+ spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
+
+ spec->outer_second_cvlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
+ spec->inner_second_cvlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
+ spec->outer_second_svlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
+ spec->inner_second_svlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
+
+ spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
+
+ spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
+ spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
+
+ spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
+
+ spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
+ spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
+
+ spec->outer_ipv6_flow_label =
+ MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
+
+ spec->inner_ipv6_flow_label =
+ MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
+
+ spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
+ spec->geneve_protocol_type =
+ MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
+
+ spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
+}
+
+static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
+{
+ u32 raw_ip[4];
+
+ spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
+
+ spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
+ spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
+
+ spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
+
+ spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
+ spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
+ spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
+ spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
+
+ spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
+ spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
+ spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
+ spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
+ spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
+ spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
+ spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
+ spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
+ spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
+ spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
+
+ spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
+
+ spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
+ spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
+
+ memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip));
+
+ spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
+ spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
+ spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
+ spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
+
+ memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip));
+
+ spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
+ spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
+ spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
+ spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
+}
+
+static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
+{
+ spec->outer_first_mpls_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
+ spec->outer_first_mpls_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
+ spec->outer_first_mpls_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
+ spec->outer_first_mpls_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
+ spec->inner_first_mpls_label =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
+ spec->inner_first_mpls_exp =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
+ spec->inner_first_mpls_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
+ spec->inner_first_mpls_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
+ spec->outer_first_mpls_over_gre_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
+ spec->outer_first_mpls_over_gre_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
+ spec->outer_first_mpls_over_gre_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
+ spec->outer_first_mpls_over_gre_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
+ spec->outer_first_mpls_over_udp_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
+ spec->outer_first_mpls_over_udp_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
+ spec->outer_first_mpls_over_udp_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
+ spec->outer_first_mpls_over_udp_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
+ spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
+ spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
+ spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
+ spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
+ spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
+ spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
+ spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
+ spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
+ spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
+ spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
+}
+
+static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
+{
+ spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
+ spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
+ spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
+ spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
+ spec->outer_vxlan_gpe_vni =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
+ spec->outer_vxlan_gpe_next_protocol =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
+ spec->outer_vxlan_gpe_flags =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
+ spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
+ spec->icmpv6_header_data =
+ MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
+ spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
+ spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
+ spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
+ spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+}
+
+void mlx5dr_ste_copy_param(u8 match_criteria,
+ struct mlx5dr_match_param *set_param,
+ struct mlx5dr_match_parameters *mask)
+{
+ u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
+ u8 *data = (u8 *)mask->match_buf;
+ size_t param_location;
+ void *buff;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+ if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
+ memcpy(tail_param, data, mask->match_sz);
+ buff = tail_param;
+ } else {
+ buff = mask->match_buf;
+ }
+ dr_ste_copy_mask_spec(buff, &set_param->outer);
+ }
+ param_location = sizeof(struct mlx5dr_match_spec);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc(buff, &set_param->misc);
+ }
+ param_location += sizeof(struct mlx5dr_match_misc);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_spec)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_spec(buff, &set_param->inner);
+ }
+ param_location += sizeof(struct mlx5dr_match_spec);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc2)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc2(buff, &set_param->misc2);
+ }
+
+ param_location += sizeof(struct mlx5dr_match_misc2);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc3)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc3(buff, &set_param->misc3);
+ }
+}
+
+static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ pr_info("Unsupported ip_version value\n");
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
+
+ return 0;
+}
+
+static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
+}
+
+static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
+}
+
+static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
+ bool inner,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_address, mask, dst_ip_31_0);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_address, mask, src_ip_31_0);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_port, mask, tcp_dport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_port, mask, udp_dport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_port, mask, tcp_sport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_port, mask, udp_sport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ protocol, mask, ip_protocol);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ dscp, mask, ip_dscp);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ ecn, mask, ip_ecn);
+
+ if (mask->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
+ mask->tcp_flags = 0;
+ }
+}
+
+static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ pr_info("Unsupported ip_version value\n");
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+}
+
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
+}
+
+static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+}
+
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
+}
+
+static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
+}
+
+static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
+}
+
+static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
+
+ if (mask->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
+ mask->tcp_flags = 0;
+ }
+}
+
+static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+}
+
+static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ return 0;
+}
+
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
+{
+ sb->rx = rx;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
+ sb->byte_mask = 0;
+ sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
+}
+
+static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
+
+ if (inner)
+ DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
+ else
+ DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
+}
+
+static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
+ else
+ DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
+}
+
+static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
+
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+}
+
+static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
+{
+ dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_gre_tag;
+}
+
+static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_udp_ttl);
+ }
+}
+
+static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
+
+static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
+ bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
+ u32 icmp_header_data_mask;
+ u32 icmp_type_mask;
+ u32 icmp_code_mask;
+ int dw0_location;
+ int dw1_location;
+
+ if (is_ipv4_mask) {
+ icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
+ icmp_type_mask = misc_3_mask->icmpv4_type;
+ icmp_code_mask = misc_3_mask->icmpv4_code;
+ dw0_location = caps->flex_parser_id_icmp_dw0;
+ dw1_location = caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
+ icmp_type_mask = misc_3_mask->icmpv6_type;
+ icmp_code_mask = misc_3_mask->icmpv6_code;
+ dw0_location = caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ if (icmp_type_mask) {
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
+ (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_type = 0;
+ else
+ misc_3_mask->icmpv6_type = 0;
+ }
+ if (icmp_code_mask) {
+ u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
+ flex_parser_4);
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
+ cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_code = 0;
+ else
+ misc_3_mask->icmpv6_code = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ if (icmp_header_data_mask) {
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
+ (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_header_data = 0;
+ else
+ misc_3_mask->icmpv6_header_data = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+ u32 icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u32 icmp_type;
+ u32 icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = misc_3->icmpv4_header_data;
+ icmp_type = misc_3->icmpv4_type;
+ icmp_code = misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = misc_3->icmpv6_header_data;
+ icmp_type = misc_3->icmpv6_type;
+ icmp_code = misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ if (icmp_type) {
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_type = 0;
+ else
+ misc_3->icmpv6_type = 0;
+ }
+
+ if (icmp_code) {
+ u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
+ flex_parser_4);
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_code = 0;
+ else
+ misc_3->icmpv6_code = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ if (icmp_header_data) {
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_header_data = 0;
+ else
+ misc_3->icmpv6_header_data = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->caps = caps;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
+
+ return 0;
+}
+
+static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(general_purpose, bit_mask,
+ general_purpose_lookup_field, misc_2_mask,
+ metadata_reg_a);
+}
+
+static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2_mask, metadata_reg_a);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
+}
+
+static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
+
+ if (inner) {
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
+ inner_tcp_seq_num);
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
+ inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
+ outer_tcp_seq_num);
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
+ outer_tcp_ack_num);
+ }
+}
+
+static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+}
+
+static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
+
+ if (misc_3_mask->outer_vxlan_gpe_flags ||
+ misc_3_mask->outer_vxlan_gpe_next_protocol) {
+ MLX5_SET(ste_flex_parser_tnl, bit_mask,
+ flex_parser_tunneling_header_63_32,
+ (misc_3_mask->outer_vxlan_gpe_flags << 24) |
+ (misc_3_mask->outer_vxlan_gpe_next_protocol));
+ misc_3_mask->outer_vxlan_gpe_flags = 0;
+ misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
+ }
+
+ if (misc_3_mask->outer_vxlan_gpe_vni) {
+ MLX5_SET(ste_flex_parser_tnl, bit_mask,
+ flex_parser_tunneling_header_31_0,
+ misc_3_mask->outer_vxlan_gpe_vni << 8);
+ misc_3_mask->outer_vxlan_gpe_vni = 0;
+ }
+}
+
+static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+
+ if (misc3->outer_vxlan_gpe_flags ||
+ misc3->outer_vxlan_gpe_next_protocol) {
+ MLX5_SET(ste_flex_parser_tnl, tag,
+ flex_parser_tunneling_header_63_32,
+ (misc3->outer_vxlan_gpe_flags << 24) |
+ (misc3->outer_vxlan_gpe_next_protocol));
+ misc3->outer_vxlan_gpe_flags = 0;
+ misc3->outer_vxlan_gpe_next_protocol = 0;
+ }
+
+ if (misc3->outer_vxlan_gpe_vni) {
+ MLX5_SET(ste_flex_parser_tnl, tag,
+ flex_parser_tunneling_header_31_0,
+ misc3->outer_vxlan_gpe_vni << 8);
+ misc3->outer_vxlan_gpe_vni = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
+}
+
+static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
+ misc_2_mask, metadata_reg_c_0);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
+ misc_2_mask, metadata_reg_c_1);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
+ misc_2_mask, metadata_reg_c_2);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
+ misc_2_mask, metadata_reg_c_3);
+}
+
+static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
+}
+
+static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
+ misc_2_mask, metadata_reg_c_4);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
+ misc_2_mask, metadata_reg_c_5);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
+ misc_2_mask, metadata_reg_c_6);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
+ misc_2_mask, metadata_reg_c_7);
+}
+
+static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+}
+
+static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ if (misc_mask->source_port != 0xffff)
+ return -EINVAL;
+
+ DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+
+ return 0;
+}
+
+static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port);
+ if (!vport_cap)
+ return -EINVAL;
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+
+ return 0;
+}
+
+int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
new file mode 100644
index 000000000000..e178d8d3dbc9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action)
+{
+ struct mlx5dr_matcher *last_matcher = NULL;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *last_htbl;
+ int ret;
+
+ if (action && action->action_type != DR_ACTION_TYP_FT)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ if (!list_empty(&tbl->matcher_list))
+ last_matcher = list_last_entry(&tbl->matcher_list,
+ struct mlx5dr_matcher,
+ matcher_list);
+
+ if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
+ tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+ if (last_matcher)
+ last_htbl = last_matcher->rx.e_anchor;
+ else
+ last_htbl = tbl->rx.s_anchor;
+
+ tbl->rx.default_icm_addr = action ?
+ action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ tbl->rx.nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = tbl->rx.default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
+ tbl->rx.nic_dmn,
+ last_htbl,
+ &info, true);
+ if (ret) {
+ mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
+ tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+ if (last_matcher)
+ last_htbl = last_matcher->tx.e_anchor;
+ else
+ last_htbl = tbl->tx.s_anchor;
+
+ tbl->tx.default_icm_addr = action ?
+ action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr :
+ tbl->tx.nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = tbl->tx.default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
+ tbl->tx.nic_dmn,
+ last_htbl, &info, true);
+ if (ret) {
+ mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Release old action */
+ if (tbl->miss_action)
+ refcount_dec(&tbl->miss_action->refcount);
+
+ /* Set new miss action */
+ tbl->miss_action = action;
+ if (tbl->miss_action)
+ refcount_inc(&action->refcount);
+
+out:
+ mutex_unlock(&tbl->dmn->mutex);
+ return ret;
+}
+
+static void dr_table_uninit_nic(struct mlx5dr_table_rx_tx *nic_tbl)
+{
+ mlx5dr_htbl_put(nic_tbl->s_anchor);
+}
+
+static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
+{
+ dr_table_uninit_nic(&tbl->rx);
+ dr_table_uninit_nic(&tbl->tx);
+}
+
+static void dr_table_uninit(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->mutex);
+
+ switch (tbl->dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_table_uninit_nic(&tbl->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_table_uninit_nic(&tbl->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_table_uninit_fdb(tbl);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ mutex_unlock(&tbl->dmn->mutex);
+}
+
+static int dr_table_init_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ int ret;
+
+ nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
+
+ nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!nic_tbl->s_anchor)
+ return -ENOMEM;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_dmn->default_icm_addr;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ nic_tbl->s_anchor,
+ &info, true);
+ if (ret)
+ goto free_s_anchor;
+
+ mlx5dr_htbl_get(nic_tbl->s_anchor);
+
+ return 0;
+
+free_s_anchor:
+ mlx5dr_ste_htbl_free(nic_tbl->s_anchor);
+ return ret;
+}
+
+static int dr_table_init_fdb(struct mlx5dr_table *tbl)
+{
+ int ret;
+
+ ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+ if (ret)
+ return ret;
+
+ ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+ if (ret)
+ goto destroy_rx;
+
+ return 0;
+
+destroy_rx:
+ dr_table_uninit_nic(&tbl->rx);
+ return ret;
+}
+
+static int dr_table_init(struct mlx5dr_table *tbl)
+{
+ int ret = 0;
+
+ INIT_LIST_HEAD(&tbl->matcher_list);
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ switch (tbl->dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX;
+ tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+ ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX;
+ tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+ ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+ tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+ ret = dr_table_init_fdb(tbl);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ return ret;
+}
+
+static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
+{
+ return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev,
+ tbl->table_id,
+ tbl->table_type);
+}
+
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+{
+ u64 icm_addr_rx = 0;
+ u64 icm_addr_tx = 0;
+ int ret;
+
+ if (tbl->rx.s_anchor)
+ icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+
+ if (tbl->tx.s_anchor)
+ icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+
+ ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
+ tbl->table_type,
+ icm_addr_rx,
+ icm_addr_tx,
+ tbl->dmn->info.caps.max_ft_level - 1,
+ true, false, NULL,
+ &tbl->table_id);
+
+ return ret;
+}
+
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
+{
+ struct mlx5dr_table *tbl;
+ int ret;
+
+ refcount_inc(&dmn->refcount);
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ goto dec_ref;
+
+ tbl->dmn = dmn;
+ tbl->level = level;
+ refcount_set(&tbl->refcount, 1);
+
+ ret = dr_table_init(tbl);
+ if (ret)
+ goto free_tbl;
+
+ ret = dr_table_create_sw_owned_tbl(tbl);
+ if (ret)
+ goto uninit_tbl;
+
+ return tbl;
+
+uninit_tbl:
+ dr_table_uninit(tbl);
+free_tbl:
+ kfree(tbl);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
+{
+ int ret;
+
+ if (refcount_read(&tbl->refcount) > 1)
+ return -EBUSY;
+
+ ret = dr_table_destroy_sw_owned_tbl(tbl);
+ if (ret)
+ return ret;
+
+ dr_table_uninit(tbl);
+
+ if (tbl->miss_action)
+ refcount_dec(&tbl->miss_action->refcount);
+
+ refcount_dec(&tbl->dmn->refcount);
+ kfree(tbl);
+
+ return ret;
+}
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
+{
+ return tbl->table_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
new file mode 100644
index 000000000000..a37ee6359be2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef _DR_TYPES_
+#define _DR_TYPES_
+
+#include <linux/mlx5/driver.h>
+#include <linux/refcount.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+#include "mlx5_ifc_dr.h"
+#include "mlx5dr.h"
+
+#define DR_RULE_MAX_STES 17
+#define DR_ACTION_MAX_STES 5
+#define WIRE_PORT 0xFFFF
+#define DR_STE_SVLAN 0x1
+#define DR_STE_CVLAN 0x2
+
+#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
+#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
+#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+
+enum mlx5dr_icm_chunk_size {
+ DR_CHUNK_SIZE_1,
+ DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
+ DR_CHUNK_SIZE_2,
+ DR_CHUNK_SIZE_4,
+ DR_CHUNK_SIZE_8,
+ DR_CHUNK_SIZE_16,
+ DR_CHUNK_SIZE_32,
+ DR_CHUNK_SIZE_64,
+ DR_CHUNK_SIZE_128,
+ DR_CHUNK_SIZE_256,
+ DR_CHUNK_SIZE_512,
+ DR_CHUNK_SIZE_1K,
+ DR_CHUNK_SIZE_2K,
+ DR_CHUNK_SIZE_4K,
+ DR_CHUNK_SIZE_8K,
+ DR_CHUNK_SIZE_16K,
+ DR_CHUNK_SIZE_32K,
+ DR_CHUNK_SIZE_64K,
+ DR_CHUNK_SIZE_128K,
+ DR_CHUNK_SIZE_256K,
+ DR_CHUNK_SIZE_512K,
+ DR_CHUNK_SIZE_1024K,
+ DR_CHUNK_SIZE_2048K,
+ DR_CHUNK_SIZE_MAX,
+};
+
+enum mlx5dr_icm_type {
+ DR_ICM_TYPE_STE,
+ DR_ICM_TYPE_MODIFY_ACTION,
+};
+
+static inline enum mlx5dr_icm_chunk_size
+mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
+{
+ chunk += 2;
+ if (chunk < DR_CHUNK_SIZE_MAX)
+ return chunk;
+
+ return DR_CHUNK_SIZE_MAX;
+}
+
+enum {
+ DR_STE_SIZE = 64,
+ DR_STE_SIZE_CTRL = 32,
+ DR_STE_SIZE_TAG = 16,
+ DR_STE_SIZE_MASK = 16,
+};
+
+enum {
+ DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
+};
+
+enum {
+ DR_MODIFY_ACTION_SIZE = 8,
+};
+
+enum mlx5dr_matcher_criteria {
+ DR_MATCHER_CRITERIA_EMPTY = 0,
+ DR_MATCHER_CRITERIA_OUTER = 1 << 0,
+ DR_MATCHER_CRITERIA_MISC = 1 << 1,
+ DR_MATCHER_CRITERIA_INNER = 1 << 2,
+ DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
+ DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
+ DR_MATCHER_CRITERIA_MAX = 1 << 5,
+};
+
+enum mlx5dr_action_type {
+ DR_ACTION_TYP_TNL_L2_TO_L2,
+ DR_ACTION_TYP_L2_TO_TNL_L2,
+ DR_ACTION_TYP_TNL_L3_TO_L2,
+ DR_ACTION_TYP_L2_TO_TNL_L3,
+ DR_ACTION_TYP_DROP,
+ DR_ACTION_TYP_QP,
+ DR_ACTION_TYP_FT,
+ DR_ACTION_TYP_CTR,
+ DR_ACTION_TYP_TAG,
+ DR_ACTION_TYP_MODIFY_HDR,
+ DR_ACTION_TYP_VPORT,
+ DR_ACTION_TYP_POP_VLAN,
+ DR_ACTION_TYP_PUSH_VLAN,
+ DR_ACTION_TYP_MAX,
+};
+
+struct mlx5dr_icm_pool;
+struct mlx5dr_icm_chunk;
+struct mlx5dr_icm_bucket;
+struct mlx5dr_ste_htbl;
+struct mlx5dr_match_param;
+struct mlx5dr_cmd_caps;
+struct mlx5dr_matcher_rx_tx;
+
+struct mlx5dr_ste {
+ u8 *hw_ste;
+ /* refcount: indicates the num of rules that using this ste */
+ refcount_t refcount;
+
+ /* attached to the miss_list head at each htbl entry */
+ struct list_head miss_list_node;
+
+ /* each rule member that uses this ste attached here */
+ struct list_head rule_list;
+
+ /* this ste is member of htbl */
+ struct mlx5dr_ste_htbl *htbl;
+
+ struct mlx5dr_ste_htbl *next_htbl;
+
+ /* this ste is part of a rule, located in ste's chain */
+ u8 ste_chain_location;
+};
+
+struct mlx5dr_ste_htbl_ctrl {
+ /* total number of valid entries belonging to this hash table. This
+ * includes the non collision and collision entries
+ */
+ unsigned int num_of_valid_entries;
+
+ /* total number of collisions entries attached to this table */
+ unsigned int num_of_collisions;
+ unsigned int increase_threshold;
+ u8 may_grow:1;
+};
+
+struct mlx5dr_ste_htbl {
+ u8 lu_type;
+ u16 byte_mask;
+ refcount_t refcount;
+ struct mlx5dr_icm_chunk *chunk;
+ struct mlx5dr_ste *ste_arr;
+ u8 *hw_ste_arr;
+
+ struct list_head *miss_list;
+
+ enum mlx5dr_icm_chunk_size chunk_size;
+ struct mlx5dr_ste *pointing_ste;
+
+ struct mlx5dr_ste_htbl_ctrl ctrl;
+};
+
+struct mlx5dr_ste_send_info {
+ struct mlx5dr_ste *ste;
+ struct list_head send_list;
+ u16 size;
+ u16 offset;
+ u8 data_cont[DR_STE_SIZE];
+ u8 *data;
+};
+
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+ u16 offset, u8 *data,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_list,
+ bool copy_data);
+
+struct mlx5dr_ste_build {
+ u8 inner:1;
+ u8 rx:1;
+ struct mlx5dr_cmd_caps *caps;
+ u8 lu_type;
+ u16 byte_mask;
+ u8 bit_mask[DR_STE_SIZE_MASK];
+ int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p);
+};
+
+struct mlx5dr_ste_htbl *
+mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ u8 lu_type, u16 byte_mask);
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
+
+static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
+{
+ if (refcount_dec_and_test(&htbl->refcount))
+ mlx5dr_ste_htbl_free(htbl);
+}
+
+static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
+{
+ refcount_inc(&htbl->refcount);
+}
+
+/* STE utils */
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
+void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
+void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_htbl *next_htbl);
+void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
+u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
+void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
+void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
+bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 ste_location);
+void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
+void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
+void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3);
+void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
+void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
+void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
+void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
+ bool go_back);
+void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
+u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
+void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index);
+void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ if (refcount_dec_and_test(&ste->refcount))
+ mlx5dr_ste_free(ste, matcher, nic_matcher);
+}
+
+/* initial as 0, increased only when ste appears in a new rule */
+static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
+{
+ refcount_inc(&ste->refcount);
+}
+
+void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
+bool mlx5dr_ste_equal_tag(void *src, void *dst);
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *cur_hw_ste,
+ enum mlx5dr_icm_chunk_size log_table_size);
+
+/* STE build functions */
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+ u8 match_criteria,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value);
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_match_param *value,
+ u8 *ste_arr);
+int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
+
+/* Actions utils */
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_action *actions[],
+ u32 num_actions,
+ u8 *ste_arr,
+ u32 *new_hw_ste_arr_sz);
+
+struct mlx5dr_match_spec {
+ u32 smac_47_16; /* Source MAC address of incoming packet */
+ /* Incoming packet Ethertype - this is the Ethertype
+ * following the last VLAN tag of the packet
+ */
+ u32 ethertype:16;
+ u32 smac_15_0:16; /* Source MAC address of incoming packet */
+ u32 dmac_47_16; /* Destination MAC address of incoming packet */
+ /* VLAN ID of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_vid:12;
+ /* CFI bit of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_cfi:1;
+ /* Priority of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_prio:3;
+ u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
+ /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
+ * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
+ */
+ u32 tcp_flags:9;
+ u32 ip_version:4; /* IP version */
+ u32 frag:1; /* Packet is an IP fragment */
+ /* The first vlan in the packet is s-vlan (0x8a88).
+ * cvlan_tag and svlan_tag cannot be set together
+ */
+ u32 svlan_tag:1;
+ /* The first vlan in the packet is c-vlan (0x8100).
+ * cvlan_tag and svlan_tag cannot be set together
+ */
+ u32 cvlan_tag:1;
+ /* Explicit Congestion Notification derived from
+ * Traffic Class/TOS field of IPv6/v4
+ */
+ u32 ip_ecn:2;
+ /* Differentiated Services Code Point derived from
+ * Traffic Class/TOS field of IPv6/v4
+ */
+ u32 ip_dscp:6;
+ u32 ip_protocol:8; /* IP protocol */
+ /* TCP destination port.
+ * tcp and udp sport/dport are mutually exclusive
+ */
+ u32 tcp_dport:16;
+ /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 tcp_sport:16;
+ u32 ttl_hoplimit:8;
+ u32 reserved:24;
+ /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_dport:16;
+ /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_sport:16;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_127_96;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_95_64;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_63_32;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_31_0;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_127_96;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_95_64;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_63_32;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_31_0;
+};
+
+struct mlx5dr_match_misc {
+ u32 source_sqn:24; /* Source SQN */
+ u32 source_vhca_port:4;
+ /* used with GRE, sequence number exist when gre_s_present == 1 */
+ u32 gre_s_present:1;
+ /* used with GRE, key exist when gre_k_present == 1 */
+ u32 gre_k_present:1;
+ u32 reserved_auto1:1;
+ /* used with GRE, checksum exist when gre_c_present == 1 */
+ u32 gre_c_present:1;
+ /* Source port.;0xffff determines wire port */
+ u32 source_port:16;
+ u32 reserved_auto2:16;
+ /* VLAN ID of first VLAN tag the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_vid:12;
+ /* CFI bit of first VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_cfi:1;
+ /* Priority of second VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_prio:3;
+ /* VLAN ID of first VLAN tag the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_vid:12;
+ /* CFI bit of first VLAN tag in the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_cfi:1;
+ /* Priority of second VLAN tag in the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_prio:3;
+ u32 gre_protocol:16; /* GRE Protocol (outer) */
+ u32 reserved_auto3:12;
+ /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 inner_second_svlan_tag:1;
+ /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
+ * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+ */
+ u32 outer_second_svlan_tag:1;
+ /* The second vlan in the inner header of the packet is c-vlan (0x8100).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 inner_second_cvlan_tag:1;
+ /* The second vlan in the outer header of the packet is c-vlan (0x8100).
+ * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+ */
+ u32 outer_second_cvlan_tag:1;
+ u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+ u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
+ u32 reserved_auto4:8;
+ u32 vxlan_vni:24; /* VXLAN VNI (outer) */
+ u32 geneve_oam:1; /* GENEVE OAM field (outer) */
+ u32 reserved_auto5:7;
+ u32 geneve_vni:24; /* GENEVE VNI field (outer) */
+ u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
+ u32 reserved_auto6:12;
+ u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
+ u32 reserved_auto7:12;
+ u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+ u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
+ u32 reserved_auto8:10;
+ u32 bth_dst_qp:24; /* Destination QP in BTH header */
+ u32 reserved_auto9:8;
+ u8 reserved_auto10[20];
+};
+
+struct mlx5dr_match_misc2 {
+ u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
+ u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
+ u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
+ u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
+ u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
+ u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
+ u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
+ u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
+ u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
+ u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
+ u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
+ u32 metadata_reg_c_7; /* metadata_reg_c_7 */
+ u32 metadata_reg_c_6; /* metadata_reg_c_6 */
+ u32 metadata_reg_c_5; /* metadata_reg_c_5 */
+ u32 metadata_reg_c_4; /* metadata_reg_c_4 */
+ u32 metadata_reg_c_3; /* metadata_reg_c_3 */
+ u32 metadata_reg_c_2; /* metadata_reg_c_2 */
+ u32 metadata_reg_c_1; /* metadata_reg_c_1 */
+ u32 metadata_reg_c_0; /* metadata_reg_c_0 */
+ u32 metadata_reg_a; /* metadata_reg_a */
+ u32 metadata_reg_b; /* metadata_reg_b */
+ u8 reserved_auto2[8];
+};
+
+struct mlx5dr_match_misc3 {
+ u32 inner_tcp_seq_num;
+ u32 outer_tcp_seq_num;
+ u32 inner_tcp_ack_num;
+ u32 outer_tcp_ack_num;
+ u32 outer_vxlan_gpe_vni:24;
+ u32 reserved_auto1:8;
+ u32 reserved_auto2:16;
+ u32 outer_vxlan_gpe_flags:8;
+ u32 outer_vxlan_gpe_next_protocol:8;
+ u32 icmpv4_header_data;
+ u32 icmpv6_header_data;
+ u32 icmpv6_code:8;
+ u32 icmpv6_type:8;
+ u32 icmpv4_code:8;
+ u32 icmpv4_type:8;
+ u8 reserved_auto3[0x1c];
+};
+
+struct mlx5dr_match_param {
+ struct mlx5dr_match_spec outer;
+ struct mlx5dr_match_misc misc;
+ struct mlx5dr_match_spec inner;
+ struct mlx5dr_match_misc2 misc2;
+ struct mlx5dr_match_misc3 misc3;
+};
+
+#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
+ (_misc3)->icmpv4_code || \
+ (_misc3)->icmpv4_header_data)
+
+struct mlx5dr_esw_caps {
+ u64 drop_icm_address_rx;
+ u64 drop_icm_address_tx;
+ u64 uplink_icm_address_rx;
+ u64 uplink_icm_address_tx;
+ bool sw_owner;
+};
+
+struct mlx5dr_cmd_vport_cap {
+ u16 vport_gvmi;
+ u16 vhca_gvmi;
+ u64 icm_address_rx;
+ u64 icm_address_tx;
+ u32 num;
+};
+
+struct mlx5dr_cmd_caps {
+ u16 gvmi;
+ u64 nic_rx_drop_address;
+ u64 nic_tx_drop_address;
+ u64 nic_tx_allow_address;
+ u64 esw_rx_drop_address;
+ u64 esw_tx_drop_address;
+ u32 log_icm_size;
+ u64 hdr_modify_icm_addr;
+ u32 flex_protocols;
+ u8 flex_parser_id_icmp_dw0;
+ u8 flex_parser_id_icmp_dw1;
+ u8 flex_parser_id_icmpv6_dw0;
+ u8 flex_parser_id_icmpv6_dw1;
+ u8 max_ft_level;
+ u16 roce_min_src_udp;
+ u8 num_esw_ports;
+ bool eswitch_manager;
+ bool rx_sw_owner;
+ bool tx_sw_owner;
+ bool fdb_sw_owner;
+ u32 num_vports;
+ struct mlx5dr_esw_caps esw_caps;
+ struct mlx5dr_cmd_vport_cap *vports_caps;
+ bool prio_tag_required;
+};
+
+struct mlx5dr_domain_rx_tx {
+ u64 drop_icm_addr;
+ u64 default_icm_addr;
+ enum mlx5dr_ste_entry_type ste_type;
+};
+
+struct mlx5dr_domain_info {
+ bool supp_sw_steering;
+ u32 max_inline_size;
+ u32 max_send_wr;
+ u32 max_log_sw_icm_sz;
+ u32 max_log_action_icm_sz;
+ struct mlx5dr_domain_rx_tx rx;
+ struct mlx5dr_domain_rx_tx tx;
+ struct mlx5dr_cmd_caps caps;
+};
+
+struct mlx5dr_domain_cache {
+ struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
+};
+
+struct mlx5dr_domain {
+ struct mlx5dr_domain *peer_dmn;
+ struct mlx5_core_dev *mdev;
+ u32 pdn;
+ struct mlx5_uars_page *uar;
+ enum mlx5dr_domain_type type;
+ refcount_t refcount;
+ struct mutex mutex; /* protect domain */
+ struct mlx5dr_icm_pool *ste_icm_pool;
+ struct mlx5dr_icm_pool *action_icm_pool;
+ struct mlx5dr_send_ring *send_ring;
+ struct mlx5dr_domain_info info;
+ struct mlx5dr_domain_cache cache;
+};
+
+struct mlx5dr_table_rx_tx {
+ struct mlx5dr_ste_htbl *s_anchor;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ u64 default_icm_addr;
+};
+
+struct mlx5dr_table {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_table_rx_tx rx;
+ struct mlx5dr_table_rx_tx tx;
+ u32 level;
+ u32 table_type;
+ u32 table_id;
+ struct list_head matcher_list;
+ struct mlx5dr_action *miss_action;
+ refcount_t refcount;
+};
+
+struct mlx5dr_matcher_rx_tx {
+ struct mlx5dr_ste_htbl *s_htbl;
+ struct mlx5dr_ste_htbl *e_anchor;
+ struct mlx5dr_ste_build *ste_builder;
+ struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
+ struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
+ u8 num_of_builders;
+ u8 num_of_builders4;
+ u8 num_of_builders6;
+ u64 default_icm_addr;
+ struct mlx5dr_table_rx_tx *nic_tbl;
+};
+
+struct mlx5dr_matcher {
+ struct mlx5dr_table *tbl;
+ struct mlx5dr_matcher_rx_tx rx;
+ struct mlx5dr_matcher_rx_tx tx;
+ struct list_head matcher_list;
+ u16 prio;
+ struct mlx5dr_match_param mask;
+ u8 match_criteria;
+ refcount_t refcount;
+ struct mlx5dv_flow_matcher *dv_matcher;
+};
+
+struct mlx5dr_rule_member {
+ struct mlx5dr_ste *ste;
+ /* attached to mlx5dr_rule via this */
+ struct list_head list;
+ /* attached to mlx5dr_ste via this */
+ struct list_head use_ste_list;
+};
+
+struct mlx5dr_action {
+ enum mlx5dr_action_type action_type;
+ refcount_t refcount;
+ union {
+ struct {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_icm_chunk *chunk;
+ u8 *data;
+ u32 data_size;
+ u16 num_of_actions;
+ u32 index;
+ u8 allow_rx:1;
+ u8 allow_tx:1;
+ u8 modify_ttl:1;
+ } rewrite;
+ struct {
+ struct mlx5dr_domain *dmn;
+ u32 reformat_id;
+ u32 reformat_size;
+ } reformat;
+ struct {
+ u8 is_fw_tbl:1;
+ union {
+ struct mlx5dr_table *tbl;
+ struct {
+ struct mlx5_flow_table *ft;
+ u64 rx_icm_addr;
+ u64 tx_icm_addr;
+ struct mlx5_core_dev *mdev;
+ } fw_tbl;
+ };
+ } dest_tbl;
+ struct {
+ u32 ctr_id;
+ u32 offeset;
+ } ctr;
+ struct {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_cmd_vport_cap *caps;
+ u32 num;
+ } vport;
+ struct {
+ u32 vlan_hdr; /* tpid_pcp_dei_vid */
+ } push_vlan;
+ u32 flow_tag;
+ };
+};
+
+enum mlx5dr_connect_type {
+ CONNECT_HIT = 1,
+ CONNECT_MISS = 2,
+};
+
+struct mlx5dr_htbl_connect_info {
+ enum mlx5dr_connect_type type;
+ union {
+ struct mlx5dr_ste_htbl *hit_next_htbl;
+ u64 miss_icm_addr;
+ };
+};
+
+struct mlx5dr_rule_rx_tx {
+ struct list_head rule_members_list;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+};
+
+struct mlx5dr_rule {
+ struct mlx5dr_matcher *matcher;
+ struct mlx5dr_rule_rx_tx rx;
+ struct mlx5dr_rule_rx_tx tx;
+ struct list_head rule_actions_list;
+};
+
+void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
+ struct mlx5dr_ste *ste);
+
+struct mlx5dr_icm_chunk {
+ struct mlx5dr_icm_bucket *bucket;
+ struct list_head chunk_list;
+ u32 rkey;
+ u32 num_of_entries;
+ u32 byte_size;
+ u64 icm_addr;
+ u64 mr_addr;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ u8 *hw_ste_arr;
+ struct list_head *miss_list;
+};
+
+static inline int
+mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+}
+
+static inline int
+mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+}
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6);
+
+static inline u32
+mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
+{
+ return 1 << chunk_size;
+}
+
+static inline int
+mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
+ enum mlx5dr_icm_type icm_type)
+{
+ int num_of_entries;
+ int entry_size;
+
+ if (icm_type == DR_ICM_TYPE_STE)
+ entry_size = DR_STE_SIZE;
+ else
+ entry_size = DR_MODIFY_ACTION_SIZE;
+
+ num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+
+ return entry_size * num_of_entries;
+}
+
+static inline struct mlx5dr_cmd_vport_cap *
+mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
+{
+ if (!caps->vports_caps ||
+ (vport >= caps->num_vports && vport != WIRE_PORT))
+ return NULL;
+
+ if (vport == WIRE_PORT)
+ vport = caps->num_vports;
+
+ return &caps->vports_caps[vport];
+}
+
+struct mlx5dr_cmd_query_flow_table_details {
+ u8 status;
+ u8 level;
+ u64 sw_owner_icm_root_1;
+ u64 sw_owner_icm_root_0;
+};
+
+/* internal API functions */
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ struct mlx5dr_cmd_caps *caps);
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+ bool other_vport, u16 vport_number,
+ u64 *icm_address_rx,
+ u64 *icm_address_tx);
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
+ bool other_vport, u16 vport_number, u16 *gvmi);
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_esw_caps *caps);
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ u32 modify_header_id,
+ u32 vport_id);
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id);
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u8 num_of_actions,
+ u64 *actions,
+ u32 *modify_header_id);
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 modify_header_id);
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 *group_id);
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id);
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u64 icm_addr_rx,
+ u64 icm_addr_tx,
+ u8 level,
+ bool sw_owner,
+ bool term_tbl,
+ u64 *fdb_rx_icm_addr,
+ u32 *table_id);
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ u32 table_type);
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type,
+ u32 table_id,
+ struct mlx5dr_cmd_query_flow_table_details *output);
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+ enum mlx5_reformat_ctx_type rt,
+ size_t reformat_size,
+ void *reformat_data,
+ u32 *reformat_id);
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+struct mlx5dr_cmd_gid_attr {
+ u8 gid[16];
+ u8 mac[6];
+ u32 roce_ver;
+};
+
+struct mlx5dr_cmd_qp_create_attr {
+ u32 page_id;
+ u32 pdn;
+ u32 cqn;
+ u32 pm_state;
+ u32 service_type;
+ u32 buff_umem_id;
+ u32 db_umem_id;
+ u32 sq_wqe_cnt;
+ u32 rq_wqe_cnt;
+ u32 rq_wqe_shift;
+};
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+ u16 index, struct mlx5dr_cmd_gid_attr *attr);
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+ enum mlx5dr_icm_type icm_type);
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
+
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size);
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
+bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_htbl_connect_info *connect_info,
+ bool update_hw_ste);
+void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste,
+ struct mlx5dr_htbl_connect_info *connect_info);
+void mlx5dr_ste_copy_param(u8 match_criteria,
+ struct mlx5dr_match_param *set_param,
+ struct mlx5dr_match_parameters *mask);
+
+void mlx5dr_crc32_init_table(void);
+u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
+
+struct mlx5dr_qp {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_qp wq;
+ struct mlx5_uars_page *uar;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_qp mqp;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ unsigned int *wqe_head;
+ unsigned int wqe_cnt;
+ } sq;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ unsigned int wqe_cnt;
+ } rq;
+ int max_inline_data;
+};
+
+struct mlx5dr_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ struct mlx5dr_qp *qp;
+};
+
+struct mlx5dr_mr {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_core_mkey mkey;
+ dma_addr_t dma_addr;
+ void *addr;
+ size_t size;
+};
+
+#define MAX_SEND_CQE 64
+#define MIN_READ_SYNC 64
+
+struct mlx5dr_send_ring {
+ struct mlx5dr_cq *cq;
+ struct mlx5dr_qp *qp;
+ struct mlx5dr_mr *mr;
+ /* How much wqes are waiting for completion */
+ u32 pending_wqe;
+ /* Signal request per this trash hold value */
+ u16 signal_th;
+ /* Each post_send_size less than max_post_send_size */
+ u32 max_post_send_size;
+ /* manage the send queue */
+ u32 tx_head;
+ void *buf;
+ u32 buf_size;
+ struct ib_wc wc[MAX_SEND_CQE];
+ u8 sync_buff[MIN_READ_SYNC];
+ struct mlx5dr_mr *sync_mr;
+};
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring);
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste *ste,
+ u8 *data,
+ u16 size,
+ u16 offset);
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste, u8 *mask);
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *ste_init_data,
+ bool update_hw_ste);
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action);
+
+struct mlx5dr_fw_recalc_cs_ft {
+ u64 rx_icm_addr;
+ u32 table_id;
+ u32 group_id;
+ u32 modify_hdr_id;
+};
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
+int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u32 vport_num,
+ u64 *rx_icm_addr);
+#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
new file mode 100644
index 000000000000..3d587d0bdbbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "mlx5dr.h"
+#include "fs_dr.h"
+
+static bool mlx5_dr_is_fw_table(u32 flags)
+{
+ if (flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
+
+static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int set_miss_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5dr_action *old_miss_action;
+ struct mlx5dr_action *action = NULL;
+ struct mlx5dr_table *next_tbl;
+ int err;
+
+ next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
+ if (next_tbl) {
+ action = mlx5dr_action_create_dest_table(next_tbl);
+ if (!action)
+ return -EINVAL;
+ }
+ old_miss_action = ft->fs_dr_table.miss_action;
+ err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
+ if (err && action) {
+ err = mlx5dr_action_destroy(action);
+ if (err) {
+ action = NULL;
+ mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
+ err);
+ }
+ }
+ ft->fs_dr_table.miss_action = action;
+ if (old_miss_action) {
+ err = mlx5dr_action_destroy(old_miss_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
+ err);
+ }
+
+ return err;
+}
+
+static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5dr_table *tbl;
+ int err;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
+ log_size,
+ next_ft);
+
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
+ ft->level);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_dr_table.dr_table = tbl;
+ ft->id = mlx5dr_table_get_id(tbl);
+
+ if (next_ft) {
+ err = set_miss_action(ns, ft, next_ft);
+ if (err) {
+ mlx5dr_table_destroy(tbl);
+ ft->fs_dr_table.dr_table = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
+ int err;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
+ err);
+ return err;
+ }
+ if (action) {
+ err = mlx5dr_action_destroy(action);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ return set_miss_action(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5dr_matcher *matcher;
+ u16 priority = MLX5_GET(create_flow_group_in, in,
+ start_flow_index);
+ u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+ in,
+ match_criteria_enable);
+ struct mlx5dr_match_parameters mask;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
+ fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
+ in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
+ priority,
+ match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_dr_matcher.dr_matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
+}
+
+static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
+ dest_attr->vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID,
+ dest_attr->vport.vhca_id);
+}
+
+static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+
+ if (mlx5_dr_is_fw_table(dest_ft->flags))
+ return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
+ return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+}
+
+static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
+ struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+ u32 vlan_hdr;
+
+ vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+ return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
+}
+
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *term_action = NULL;
+ struct mlx5dr_match_parameters params;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5dr_action **fs_dr_actions;
+ struct mlx5dr_action *tmp_action;
+ struct mlx5dr_action **actions;
+ bool delay_encap_set = false;
+ struct mlx5dr_rule *rule;
+ struct mlx5_flow_rule *dst;
+ int fs_dr_num_actions = 0;
+ int num_actions = 0;
+ size_t match_sz;
+ int err = 0;
+ int i;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+ actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
+ GFP_KERNEL);
+ if (!actions)
+ return -ENOMEM;
+
+ fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_dr_actions), GFP_KERNEL);
+ if (!fs_dr_actions) {
+ kfree(actions);
+ return -ENOMEM;
+ }
+
+ match_sz = sizeof(fte->val);
+
+ /* The order of the actions are must to be keep, only the following
+ * order is supported by SW steering:
+ * TX: push vlan -> modify header -> encap
+ * RX: decap -> pop vlan -> modify header
+ */
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ enum mlx5dr_action_reformat_type decap_type =
+ DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
+
+ tmp_action = mlx5dr_action_create_packet_reformat(domain,
+ decap_type, 0,
+ NULL);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ bool is_decap = fte->action.pkt_reformat->reformat_type ==
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+
+ if (is_decap)
+ actions[num_actions++] =
+ fte->action.pkt_reformat->action.dr_action;
+ else
+ delay_encap_set = true;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action =
+ mlx5dr_action_create_pop_vlan();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action =
+ mlx5dr_action_create_pop_vlan();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ actions[num_actions++] =
+ fte->action.modify_hdr->action.dr_action;
+
+ if (delay_encap_set)
+ actions[num_actions++] =
+ fte->action.pkt_reformat->action.dr_action;
+
+ /* The order of the actions below is not important */
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ tmp_action = mlx5dr_action_create_drop();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ }
+
+ if (fte->flow_context.flow_tag) {
+ tmp_action =
+ mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ u32 id;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ id = dst->dest_attr.counter_id;
+
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ tmp_action = create_ft_action(dev, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ tmp_action = create_vport_action(domain, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ }
+ }
+
+ params.match_sz = match_sz;
+ params.match_buf = (u64 *)fte->val;
+
+ if (term_action)
+ actions[num_actions++] = term_action;
+
+ rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
+ &params,
+ num_actions,
+ actions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ kfree(actions);
+ fte->fs_dr_rule.dr_rule = rule;
+ fte->fs_dr_rule.num_actions = fs_dr_num_actions;
+ fte->fs_dr_rule.dr_actions = fs_dr_actions;
+
+ return 0;
+
+free_actions:
+ for (i = 0; i < fs_dr_num_actions; i++)
+ if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
+ mlx5dr_action_destroy(fs_dr_actions[i]);
+
+ mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
+ kfree(actions);
+ kfree(fs_dr_actions);
+ return err;
+}
+
+static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *action;
+ int dr_reformat;
+
+ switch (reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ reformat_type);
+ return -EOPNOTSUPP;
+ }
+
+ action = mlx5dr_action_create_packet_reformat(dr_domain,
+ dr_reformat,
+ size,
+ reformat_data);
+ if (!action) {
+ mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
+ return -EINVAL;
+ }
+
+ pkt_reformat->action.dr_action = action;
+
+ return 0;
+}
+
+static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ mlx5dr_action_destroy(pkt_reformat->action.dr_action);
+}
+
+static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *action;
+ size_t actions_sz;
+
+ actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
+ num_actions;
+ action = mlx5dr_action_create_modify_header(dr_domain, 0,
+ actions_sz,
+ modify_actions);
+ if (!action) {
+ mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
+ return -EINVAL;
+ }
+
+ modify_hdr->action.dr_action = action;
+
+ return 0;
+}
+
+static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ mlx5dr_action_destroy(modify_hdr->action.dr_action);
+}
+
+static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
+ int err;
+ int i;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5dr_rule_destroy(rule->dr_rule);
+ if (err)
+ return err;
+
+ for (i = 0; i < rule->num_actions; i++)
+ if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
+ mlx5dr_action_destroy(rule->dr_actions[i]);
+
+ kfree(rule->dr_actions);
+ return 0;
+}
+
+static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ struct mlx5dr_domain *peer_domain = NULL;
+
+ if (peer_ns)
+ peer_domain = peer_ns->fs_dr_domain.dr_domain;
+ mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
+ peer_domain);
+ return 0;
+}
+
+static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ ns->fs_dr_domain.dr_domain =
+ mlx5dr_domain_create(ns->dev,
+ MLX5DR_DOMAIN_TYPE_FDB);
+ if (!ns->fs_dr_domain.dr_domain) {
+ mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
+}
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5dr_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
+ .create_flow_table = mlx5_cmd_dr_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
+ .create_flow_group = mlx5_cmd_dr_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
+ .create_fte = mlx5_cmd_dr_create_fte,
+ .update_fte = mlx5_cmd_dr_update_fte,
+ .delete_fte = mlx5_cmd_dr_delete_fte,
+ .update_root_ft = mlx5_cmd_dr_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .set_peer = mlx5_cmd_dr_set_peer,
+ .create_ns = mlx5_cmd_dr_create_ns,
+ .destroy_ns = mlx5_cmd_dr_destroy_ns,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+ return &mlx5_flow_cmds_dr;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
new file mode 100644
index 000000000000..1fb185d6ac7f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2019 Mellanox Technologies
+ */
+
+#ifndef _MLX5_FS_DR_
+#define _MLX5_FS_DR_
+
+#include "mlx5dr.h"
+
+struct mlx5_flow_root_namespace;
+struct fs_fte;
+
+struct mlx5_fs_dr_action {
+ struct mlx5dr_action *dr_action;
+};
+
+struct mlx5_fs_dr_ns {
+ struct mlx5_dr_ns *dr_ns;
+};
+
+struct mlx5_fs_dr_rule {
+ struct mlx5dr_rule *dr_rule;
+ /* Only actions created by fs_dr */
+ struct mlx5dr_action **dr_actions;
+ int num_actions;
+};
+
+struct mlx5_fs_dr_domain {
+ struct mlx5dr_domain *dr_domain;
+};
+
+struct mlx5_fs_dr_matcher {
+ struct mlx5dr_matcher *dr_matcher;
+};
+
+struct mlx5_fs_dr_table {
+ struct mlx5dr_table *dr_table;
+ struct mlx5dr_action *miss_action;
+};
+
+#ifdef CONFIG_MLX5_SW_STEERING
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
+
+#else
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+ return NULL;
+}
+
+static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+#endif /* CONFIG_MLX5_SW_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
new file mode 100644
index 000000000000..596c927220d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef MLX5_IFC_DR_H
+#define MLX5_IFC_DR_H
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
+ MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
+ MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
+ MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
+ MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
+};
+
+enum {
+ MLX5DR_STE_LU_TYPE_NOP = 0x00,
+ MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
+ MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
+ MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
+ MLX5DR_STE_LU_TYPE_GRE = 0x16,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
+};
+
+enum mlx5dr_ste_entry_type {
+ MLX5DR_STE_TYPE_TX = 1,
+ MLX5DR_STE_TYPE_RX = 2,
+ MLX5DR_STE_TYPE_MODIFY_PKT = 6,
+};
+
+struct mlx5_ifc_ste_general_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_5c[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 reserved_at_60[0xa0];
+ u8 tag_value[0x60];
+ u8 bit_mask[0x60];
+};
+
+struct mlx5_ifc_ste_sx_transmit_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_5c[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 sx_wire[0x1];
+ u8 sx_func_lb[0x1];
+ u8 sx_sniffer[0x1];
+ u8 sx_wire_enable[0x1];
+ u8 sx_func_lb_enable[0x1];
+ u8 sx_sniffer_enable[0x1];
+ u8 action_type[0x3];
+ u8 reserved_at_69[0x1];
+ u8 action_description[0x6];
+ u8 gvmi[0x10];
+
+ u8 encap_pointer_vlan_data[0x20];
+
+ u8 loopback_syndome_en[0x8];
+ u8 loopback_syndome[0x8];
+ u8 counter_trigger[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 go_back[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_rx_steering_mult_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 member_count[0x10];
+ u8 gvmi[0x10];
+
+ u8 qp_list_pointer[0x20];
+
+ u8 reserved_at_a0[0x1];
+ u8 tunneling_action[0x3];
+ u8 action_description[0x4];
+ u8 reserved_at_a8[0x8];
+ u8 counter_trigger_15_0[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x08];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 fail_on_error[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_modify_packet_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 number_of_re_write_actions[0x10];
+ u8 gvmi[0x10];
+
+ u8 header_re_write_actions_pointer[0x20];
+
+ u8 reserved_at_a0[0x1];
+ u8 tunneling_action[0x3];
+ u8 action_description[0x4];
+ u8 reserved_at_a8[0x8];
+ u8 counter_trigger_15_0[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x08];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 fail_on_error[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 qp_type[0x2];
+ u8 ethertype_filter[0x1];
+ u8 reserved_at_43[0x1];
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 reserved_at_48[0x4];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_52[0x2];
+ u8 first_vlan_id[0xc];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 reserved_at_68[0x4];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 qp_type[0x2];
+ u8 ethertype_filter[0x1];
+ u8 reserved_at_43[0x1];
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 reserved_at_48[0x4];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_52[0x2];
+ u8 first_vlan_id[0xc];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 reserved_at_68[0x4];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 smac_47_32[0x10];
+
+ u8 smac_31_0[0x20];
+
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 l3_type[0x2];
+ u8 reserved_at_66[0x6];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_bits {
+ u8 destination_address[0x20];
+
+ u8 source_address[0x20];
+
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+
+ u8 fragmented[0x1];
+ u8 first_fragment[0x1];
+ u8 reserved_at_62[0x2];
+ u8 reserved_at_64[0x1];
+ u8 ecn[0x2];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 dscp[0x6];
+ u8 reserved_at_76[0x2];
+ u8 protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_dst_bits {
+ u8 dst_ip_127_96[0x20];
+
+ u8 dst_ip_95_64[0x20];
+
+ u8 dst_ip_63_32[0x20];
+
+ u8 dst_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 l2_tunneling_network_id[0x20];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 reserved_at_6c[0x3];
+ u8 gre_key_flag[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_src_bits {
+ u8 src_ip_127_96[0x20];
+
+ u8 src_ip_95_64[0x20];
+
+ u8 src_ip_63_32[0x20];
+
+ u8 src_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_bits {
+ u8 version[0x4];
+ u8 ihl[0x4];
+ u8 reserved_at_8[0x8];
+ u8 total_length[0x10];
+
+ u8 identification[0x10];
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+
+ u8 time_to_live[0x8];
+ u8 reserved_at_48[0x8];
+ u8 checksum[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l4_bits {
+ u8 fragmented[0x1];
+ u8 first_fragment[0x1];
+ u8 reserved_at_2[0x6];
+ u8 protocol[0x8];
+ u8 dst_port[0x10];
+
+ u8 ipv6_version[0x4];
+ u8 reserved_at_24[0x1];
+ u8 ecn[0x2];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 src_port[0x10];
+
+ u8 ipv6_payload_length[0x10];
+ u8 ipv6_hop_limit[0x8];
+ u8 dscp[0x6];
+ u8 reserved_at_5e[0x2];
+
+ u8 tcp_data_offset[0x4];
+ u8 reserved_at_64[0x8];
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_bits {
+ u8 checksum[0x10];
+ u8 length[0x10];
+
+ u8 seq_num[0x20];
+
+ u8 ack_num[0x20];
+
+ u8 urgent_pointer[0x10];
+ u8 window_size[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_bits {
+ u8 mpls0_label[0x14];
+ u8 mpls0_exp[0x3];
+ u8 mpls0_s_bos[0x1];
+ u8 mpls0_ttl[0x8];
+
+ u8 mpls1_label[0x20];
+
+ u8 mpls2_label[0x20];
+
+ u8 reserved_at_60[0x16];
+ u8 mpls4_s_bit[0x1];
+ u8 mpls4_qualifier[0x1];
+ u8 mpls3_s_bit[0x1];
+ u8 mpls3_qualifier[0x1];
+ u8 mpls2_s_bit[0x1];
+ u8 mpls2_qualifier[0x1];
+ u8 mpls1_s_bit[0x1];
+ u8 mpls1_qualifier[0x1];
+ u8 mpls0_s_bit[0x1];
+ u8 mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_ste_register_0_bits {
+ u8 register_0_h[0x20];
+
+ u8 register_0_l[0x20];
+
+ u8 register_1_h[0x20];
+
+ u8 register_1_l[0x20];
+};
+
+struct mlx5_ifc_ste_register_1_bits {
+ u8 register_2_h[0x20];
+
+ u8 register_2_l[0x20];
+
+ u8 register_3_h[0x20];
+
+ u8 register_3_l[0x20];
+};
+
+struct mlx5_ifc_ste_gre_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_30[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 strict_src_route[0x1];
+ u8 recur[0x3];
+ u8 flags[0x5];
+ u8 version[0x3];
+ u8 gre_protocol[0x10];
+
+ u8 checksum[0x10];
+ u8 offset[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 seq_num[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_0_bits {
+ u8 parser_3_label[0x14];
+ u8 parser_3_exp[0x3];
+ u8 parser_3_s_bos[0x1];
+ u8 parser_3_ttl[0x8];
+
+ u8 flex_parser_2[0x20];
+
+ u8 flex_parser_1[0x20];
+
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_1_bits {
+ u8 flex_parser_7[0x20];
+
+ u8 flex_parser_6[0x20];
+
+ u8 flex_parser_5[0x20];
+
+ u8 flex_parser_4[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_bits {
+ u8 flex_parser_tunneling_header_63_32[0x20];
+
+ u8 flex_parser_tunneling_header_31_0[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_general_purpose_bits {
+ u8 general_purpose_lookup_field[0x20];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_bits {
+ u8 loopback_syndrome[0x8];
+ u8 reserved_at_8[0x8];
+ u8 source_gvmi[0x10];
+
+ u8 reserved_at_20[0x5];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 source_is_requestor[0x1];
+ u8 source_qp[0x18];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_l2_hdr_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 smac_47_32[0x10];
+
+ u8 smac_31_0[0x20];
+
+ u8 ethertype[0x10];
+ u8 vlan_type[0x10];
+
+ u8 vlan[0x10];
+ u8 reserved_at_90[0x10];
+};
+
+/* Both HW set and HW add share the same HW format with different opcodes */
+struct mlx5_ifc_dr_action_hw_set_bits {
+ u8 opcode[0x8];
+ u8 destination_field_code[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x3];
+ u8 destination_length[0x5];
+
+ u8 inline_data[0x20];
+};
+
+#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
new file mode 100644
index 000000000000..adda9cbfba45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef _MLX5DR_H_
+#define _MLX5DR_H_
+
+struct mlx5dr_domain;
+struct mlx5dr_table;
+struct mlx5dr_matcher;
+struct mlx5dr_rule;
+struct mlx5dr_action;
+
+enum mlx5dr_domain_type {
+ MLX5DR_DOMAIN_TYPE_NIC_RX,
+ MLX5DR_DOMAIN_TYPE_NIC_TX,
+ MLX5DR_DOMAIN_TYPE_FDB,
+};
+
+enum mlx5dr_domain_sync_flags {
+ MLX5DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0,
+ MLX5DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1,
+};
+
+enum mlx5dr_action_reformat_type {
+ DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2,
+ DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
+ DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
+ DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
+};
+
+struct mlx5dr_match_parameters {
+ size_t match_sz;
+ u64 *match_buf; /* Device spec format */
+};
+
+#ifdef CONFIG_MLX5_SW_STEERING
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
+
+int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn);
+
+struct mlx5dr_table *
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
+
+int mlx5dr_table_destroy(struct mlx5dr_table *table);
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *table,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask);
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_rule *
+mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[]);
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id);
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void);
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data);
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[]);
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
+
+struct mlx5dr_action *
+mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action);
+
+static inline bool
+mlx5dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
+}
+
+#else /* CONFIG_MLX5_SW_STEERING */
+
+static inline struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
+
+static inline int
+mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; }
+
+static inline int
+mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; }
+
+static inline void
+mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn) { }
+
+static inline struct mlx5dr_table *
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
+
+static inline int
+mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
+
+static inline u32
+mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
+
+static inline struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *table,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask) { return NULL; }
+
+static inline int
+mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; }
+
+static inline struct mlx5dr_rule *
+mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[]) { return NULL; }
+
+static inline int
+mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; }
+
+static inline int
+mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action) { return 0; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_drop(void) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_tag(u32 tag_value) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[]) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_pop_vlan(void) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain,
+ __be32 vlan_hdr) { return NULL; }
+
+static inline int
+mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; }
+
+static inline bool
+mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; }
+
+#endif /* CONFIG_MLX5_SW_STEERING */
+
+#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
new file mode 100644
index 000000000000..5ea570be8379
--- /dev/null
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019 Pensando Systems, Inc
+#
+# Pensando device configuration
+#
+
+config NET_VENDOR_PENSANDO
+ bool "Pensando devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Pensando cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_PENSANDO
+
+config IONIC
+ tristate "Pensando Ethernet IONIC Support"
+ depends on 64BIT && PCI
+ help
+ This enables the support for the Pensando family of Ethernet
+ adapters. More specific information on this driver can be
+ found in
+ <file:Documentation/networking/device_drivers/pensando/ionic.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ionic.
+
+endif # NET_VENDOR_PENSANDO
diff --git a/drivers/net/ethernet/pensando/Makefile b/drivers/net/ethernet/pensando/Makefile
new file mode 100644
index 000000000000..21ce7499c122
--- /dev/null
+++ b/drivers/net/ethernet/pensando/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Pensando network device drivers.
+#
+
+obj-$(CONFIG_IONIC) += ionic/
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
new file mode 100644
index 000000000000..29f304d75261
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2017 - 2019 Pensando Systems, Inc
+
+obj-$(CONFIG_IONIC) := ionic.o
+
+ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
+ ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
+ ionic_txrx.o ionic_stats.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
new file mode 100644
index 000000000000..7a7060677f15
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_H_
+#define _IONIC_H_
+
+struct ionic_lif;
+
+#include "ionic_if.h"
+#include "ionic_dev.h"
+#include "ionic_devlink.h"
+
+#define IONIC_DRV_NAME "ionic"
+#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
+#define IONIC_DRV_VERSION "0.15.0-k"
+
+#define PCI_VENDOR_ID_PENSANDO 0x1dd8
+
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+
+#define IONIC_SUBDEV_ID_NAPLES_25 0x4000
+#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001
+#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002
+
+#define DEVCMD_TIMEOUT 10
+
+struct ionic {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct devlink_port dl_port;
+ struct ionic_dev idev;
+ struct mutex dev_cmd_lock; /* lock for dev_cmd operations */
+ struct dentry *dentry;
+ struct ionic_dev_bar bars[IONIC_BARS_MAX];
+ unsigned int num_bars;
+ struct ionic_identity ident;
+ struct list_head lifs;
+ struct ionic_lif *master_lif;
+ unsigned int nnqs_per_lif;
+ unsigned int neqs_per_lif;
+ unsigned int ntxqs_per_lif;
+ unsigned int nrxqs_per_lif;
+ DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
+ unsigned int nintrs;
+ DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
+ struct work_struct nb_work;
+ struct notifier_block nb;
+};
+
+struct ionic_admin_ctx {
+ struct completion work;
+ union ionic_adminq_cmd cmd;
+ union ionic_adminq_comp comp;
+};
+
+int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
+ ionic_cq_done_cb done_cb, void *done_arg);
+
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
+int ionic_set_dma_mask(struct ionic *ionic);
+int ionic_setup(struct ionic *ionic);
+
+int ionic_identify(struct ionic *ionic);
+int ionic_init(struct ionic *ionic);
+int ionic_reset(struct ionic *ionic);
+
+int ionic_port_identify(struct ionic *ionic);
+int ionic_port_init(struct ionic *ionic);
+int ionic_port_reset(struct ionic *ionic);
+
+#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus.h b/drivers/net/ethernet/pensando/ionic/ionic_bus.h
new file mode 100644
index 000000000000..2f4d08c64910
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_BUS_H_
+#define _IONIC_BUS_H_
+
+int ionic_bus_get_irq(struct ionic *ionic, unsigned int num);
+const char *ionic_bus_info(struct ionic *ionic);
+int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs);
+void ionic_bus_free_irq_vectors(struct ionic *ionic);
+int ionic_bus_register_driver(void);
+void ionic_bus_unregister_driver(void);
+void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num);
+void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page);
+
+#endif /* _IONIC_BUS_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
new file mode 100644
index 000000000000..9a9ab8cb2cb3
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_debugfs.h"
+
+/* Supported devices */
+static const struct pci_device_id ionic_id_table[] = {
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, ionic_id_table);
+
+int ionic_bus_get_irq(struct ionic *ionic, unsigned int num)
+{
+ return pci_irq_vector(ionic->pdev, num);
+}
+
+const char *ionic_bus_info(struct ionic *ionic)
+{
+ return pci_name(ionic->pdev);
+}
+
+int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
+{
+ return pci_alloc_irq_vectors(ionic->pdev, nintrs, nintrs,
+ PCI_IRQ_MSIX);
+}
+
+void ionic_bus_free_irq_vectors(struct ionic *ionic)
+{
+ pci_free_irq_vectors(ionic->pdev);
+}
+
+static int ionic_map_bars(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+ struct ionic_dev_bar *bars;
+ unsigned int i, j;
+
+ bars = ionic->bars;
+ ionic->num_bars = 0;
+
+ for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ bars[j].len = pci_resource_len(pdev, i);
+
+ /* only map the whole bar 0 */
+ if (j > 0) {
+ bars[j].vaddr = NULL;
+ } else {
+ bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
+ if (!bars[j].vaddr) {
+ dev_err(dev,
+ "Cannot memory-map BAR %d, aborting\n",
+ i);
+ return -ENODEV;
+ }
+ }
+
+ bars[j].bus_addr = pci_resource_start(pdev, i);
+ bars[j].res_index = i;
+ ionic->num_bars++;
+ j++;
+ }
+
+ return 0;
+}
+
+static void ionic_unmap_bars(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bars = ionic->bars;
+ unsigned int i;
+
+ for (i = 0; i < IONIC_BARS_MAX; i++) {
+ if (bars[i].vaddr) {
+ iounmap(bars[i].vaddr);
+ bars[i].bus_addr = 0;
+ bars[i].vaddr = NULL;
+ bars[i].len = 0;
+ }
+ }
+}
+
+void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
+{
+ return pci_iomap_range(ionic->pdev,
+ ionic->bars[IONIC_PCI_BAR_DBELL].res_index,
+ (u64)page_num << PAGE_SHIFT, PAGE_SIZE);
+}
+
+void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
+{
+ iounmap(page);
+}
+
+static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ionic *ionic;
+ int err;
+
+ ionic = ionic_devlink_alloc(dev);
+ if (!ionic)
+ return -ENOMEM;
+
+ ionic->pdev = pdev;
+ ionic->dev = dev;
+ pci_set_drvdata(pdev, ionic);
+ mutex_init(&ionic->dev_cmd_lock);
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
+ err);
+ goto err_out_clear_drvdata;
+ }
+
+ ionic_debugfs_add_dev(ionic);
+
+ /* Setup PCI device */
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %d, aborting\n", err);
+ goto err_out_debugfs_del_dev;
+ }
+
+ err = pci_request_regions(pdev, IONIC_DRV_NAME);
+ if (err) {
+ dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err);
+ goto err_out_pci_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = ionic_map_bars(ionic);
+ if (err)
+ goto err_out_pci_clear_master;
+
+ /* Configure the device */
+ err = ionic_setup(ionic);
+ if (err) {
+ dev_err(dev, "Cannot setup device: %d, aborting\n", err);
+ goto err_out_unmap_bars;
+ }
+
+ err = ionic_identify(ionic);
+ if (err) {
+ dev_err(dev, "Cannot identify device: %d, aborting\n", err);
+ goto err_out_teardown;
+ }
+
+ err = ionic_init(ionic);
+ if (err) {
+ dev_err(dev, "Cannot init device: %d, aborting\n", err);
+ goto err_out_teardown;
+ }
+
+ /* Configure the ports */
+ err = ionic_port_identify(ionic);
+ if (err) {
+ dev_err(dev, "Cannot identify port: %d, aborting\n", err);
+ goto err_out_reset;
+ }
+
+ err = ionic_port_init(ionic);
+ if (err) {
+ dev_err(dev, "Cannot init port: %d, aborting\n", err);
+ goto err_out_reset;
+ }
+
+ /* Configure LIFs */
+ err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
+ &ionic->ident.lif);
+ if (err) {
+ dev_err(dev, "Cannot identify LIFs: %d, aborting\n", err);
+ goto err_out_port_reset;
+ }
+
+ err = ionic_lifs_size(ionic);
+ if (err) {
+ dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
+ goto err_out_port_reset;
+ }
+
+ err = ionic_lifs_alloc(ionic);
+ if (err) {
+ dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
+ goto err_out_free_irqs;
+ }
+
+ err = ionic_lifs_init(ionic);
+ if (err) {
+ dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
+ goto err_out_free_lifs;
+ }
+
+ err = ionic_lifs_register(ionic);
+ if (err) {
+ dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
+ goto err_out_deinit_lifs;
+ }
+
+ err = ionic_devlink_register(ionic);
+ if (err) {
+ dev_err(dev, "Cannot register devlink: %d\n", err);
+ goto err_out_deregister_lifs;
+ }
+
+ return 0;
+
+err_out_deregister_lifs:
+ ionic_lifs_unregister(ionic);
+err_out_deinit_lifs:
+ ionic_lifs_deinit(ionic);
+err_out_free_lifs:
+ ionic_lifs_free(ionic);
+err_out_free_irqs:
+ ionic_bus_free_irq_vectors(ionic);
+err_out_port_reset:
+ ionic_port_reset(ionic);
+err_out_reset:
+ ionic_reset(ionic);
+err_out_teardown:
+ ionic_dev_teardown(ionic);
+err_out_unmap_bars:
+ ionic_unmap_bars(ionic);
+ pci_release_regions(pdev);
+err_out_pci_clear_master:
+ pci_clear_master(pdev);
+err_out_pci_disable_device:
+ pci_disable_device(pdev);
+err_out_debugfs_del_dev:
+ ionic_debugfs_del_dev(ionic);
+err_out_clear_drvdata:
+ mutex_destroy(&ionic->dev_cmd_lock);
+ ionic_devlink_free(ionic);
+
+ return err;
+}
+
+static void ionic_remove(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+
+ if (!ionic)
+ return;
+
+ ionic_devlink_unregister(ionic);
+ ionic_lifs_unregister(ionic);
+ ionic_lifs_deinit(ionic);
+ ionic_lifs_free(ionic);
+ ionic_bus_free_irq_vectors(ionic);
+ ionic_port_reset(ionic);
+ ionic_reset(ionic);
+ ionic_dev_teardown(ionic);
+ ionic_unmap_bars(ionic);
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ ionic_debugfs_del_dev(ionic);
+ mutex_destroy(&ionic->dev_cmd_lock);
+ ionic_devlink_free(ionic);
+}
+
+static struct pci_driver ionic_driver = {
+ .name = IONIC_DRV_NAME,
+ .id_table = ionic_id_table,
+ .probe = ionic_probe,
+ .remove = ionic_remove,
+};
+
+int ionic_bus_register_driver(void)
+{
+ return pci_register_driver(&ionic_driver);
+}
+
+void ionic_bus_unregister_driver(void)
+{
+ pci_unregister_driver(&ionic_driver);
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
new file mode 100644
index 000000000000..7afc4a365b75
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_debugfs.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *ionic_dir;
+
+void ionic_debugfs_create(void)
+{
+ ionic_dir = debugfs_create_dir(IONIC_DRV_NAME, NULL);
+}
+
+void ionic_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(ionic_dir);
+}
+
+void ionic_debugfs_add_dev(struct ionic *ionic)
+{
+ ionic->dentry = debugfs_create_dir(ionic_bus_info(ionic), ionic_dir);
+}
+
+void ionic_debugfs_del_dev(struct ionic *ionic)
+{
+ debugfs_remove_recursive(ionic->dentry);
+ ionic->dentry = NULL;
+}
+
+static int identity_show(struct seq_file *seq, void *v)
+{
+ struct ionic *ionic = seq->private;
+ struct ionic_identity *ident;
+
+ ident = &ionic->ident;
+
+ seq_printf(seq, "nlifs: %d\n", ident->dev.nlifs);
+ seq_printf(seq, "nintrs: %d\n", ident->dev.nintrs);
+ seq_printf(seq, "ndbpgs_per_lif: %d\n", ident->dev.ndbpgs_per_lif);
+ seq_printf(seq, "intr_coal_mult: %d\n", ident->dev.intr_coal_mult);
+ seq_printf(seq, "intr_coal_div: %d\n", ident->dev.intr_coal_div);
+
+ seq_printf(seq, "max_ucast_filters: %d\n", ident->lif.eth.max_ucast_filters);
+ seq_printf(seq, "max_mcast_filters: %d\n", ident->lif.eth.max_mcast_filters);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(identity);
+
+void ionic_debugfs_add_ident(struct ionic *ionic)
+{
+ debugfs_create_file("identity", 0400, ionic->dentry,
+ ionic, &identity_fops) ? 0 : -EOPNOTSUPP;
+}
+
+void ionic_debugfs_add_sizes(struct ionic *ionic)
+{
+ debugfs_create_u32("nlifs", 0400, ionic->dentry,
+ (u32 *)&ionic->ident.dev.nlifs);
+ debugfs_create_u32("nintrs", 0400, ionic->dentry, &ionic->nintrs);
+
+ debugfs_create_u32("ntxqs_per_lif", 0400, ionic->dentry,
+ (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]);
+ debugfs_create_u32("nrxqs_per_lif", 0400, ionic->dentry,
+ (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]);
+}
+
+static int q_tail_show(struct seq_file *seq, void *v)
+{
+ struct ionic_queue *q = seq->private;
+
+ seq_printf(seq, "%d\n", q->tail->index);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(q_tail);
+
+static int q_head_show(struct seq_file *seq, void *v)
+{
+ struct ionic_queue *q = seq->private;
+
+ seq_printf(seq, "%d\n", q->head->index);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(q_head);
+
+static int cq_tail_show(struct seq_file *seq, void *v)
+{
+ struct ionic_cq *cq = seq->private;
+
+ seq_printf(seq, "%d\n", cq->tail->index);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cq_tail);
+
+static const struct debugfs_reg32 intr_ctrl_regs[] = {
+ { .name = "coal_init", .offset = 0, },
+ { .name = "mask", .offset = 4, },
+ { .name = "credits", .offset = 8, },
+ { .name = "mask_on_assert", .offset = 12, },
+ { .name = "coal_timer", .offset = 16, },
+};
+
+void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct debugfs_regset32 *intr_ctrl_regset;
+ struct ionic_intr_info *intr = &qcq->intr;
+ struct debugfs_blob_wrapper *desc_blob;
+ struct device *dev = lif->ionic->dev;
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_cq *cq = &qcq->cq;
+
+ qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
+
+ debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
+ debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
+
+ q_dentry = debugfs_create_dir("q", qcq->dentry);
+
+ debugfs_create_u32("index", 0400, q_dentry, &q->index);
+ debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
+ if (qcq->flags & IONIC_QCQ_F_SG) {
+ debugfs_create_x64("sg_base_pa", 0400, q_dentry,
+ &q->sg_base_pa);
+ debugfs_create_u32("sg_desc_size", 0400, q_dentry,
+ &q->sg_desc_size);
+ }
+ debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
+ debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
+ debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
+ debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
+ debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
+ debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
+ debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
+ debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
+
+ debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
+ debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
+
+ desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
+ if (!desc_blob)
+ return;
+ desc_blob->data = q->base;
+ desc_blob->size = (unsigned long)q->num_descs * q->desc_size;
+ debugfs_create_blob("desc_blob", 0400, q_dentry, desc_blob);
+
+ if (qcq->flags & IONIC_QCQ_F_SG) {
+ desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
+ if (!desc_blob)
+ return;
+ desc_blob->data = q->sg_base;
+ desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size;
+ debugfs_create_blob("sg_desc_blob", 0400, q_dentry,
+ desc_blob);
+ }
+
+ cq_dentry = debugfs_create_dir("cq", qcq->dentry);
+
+ debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
+ debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
+ debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
+ debugfs_create_u8("done_color", 0400, cq_dentry,
+ (u8 *)&cq->done_color);
+
+ debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
+
+ desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
+ if (!desc_blob)
+ return;
+ desc_blob->data = cq->base;
+ desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size;
+ debugfs_create_blob("desc_blob", 0400, cq_dentry, desc_blob);
+
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ intr_dentry = debugfs_create_dir("intr", qcq->dentry);
+
+ debugfs_create_u32("index", 0400, intr_dentry,
+ &intr->index);
+ debugfs_create_u32("vector", 0400, intr_dentry,
+ &intr->vector);
+
+ intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
+ if (!intr_ctrl_regset)
+ return;
+ intr_ctrl_regset->regs = intr_ctrl_regs;
+ intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
+ intr_ctrl_regset->base = &idev->intr_ctrl[intr->index];
+
+ debugfs_create_regset32("intr_ctrl", 0400, intr_dentry,
+ intr_ctrl_regset);
+ }
+
+ if (qcq->flags & IONIC_QCQ_F_NOTIFYQ) {
+ stats_dentry = debugfs_create_dir("notifyblock", qcq->dentry);
+
+ debugfs_create_u64("eid", 0400, stats_dentry,
+ (u64 *)&lif->info->status.eid);
+ debugfs_create_u16("link_status", 0400, stats_dentry,
+ (u16 *)&lif->info->status.link_status);
+ debugfs_create_u32("link_speed", 0400, stats_dentry,
+ (u32 *)&lif->info->status.link_speed);
+ debugfs_create_u16("link_down_count", 0400, stats_dentry,
+ (u16 *)&lif->info->status.link_down_count);
+ }
+}
+
+static int netdev_show(struct seq_file *seq, void *v)
+{
+ struct net_device *netdev = seq->private;
+
+ seq_printf(seq, "%s\n", netdev->name);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(netdev);
+
+void ionic_debugfs_add_lif(struct ionic_lif *lif)
+{
+ lif->dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
+ debugfs_create_file("netdev", 0400, lif->dentry,
+ lif->netdev, &netdev_fops);
+}
+
+void ionic_debugfs_del_lif(struct ionic_lif *lif)
+{
+ debugfs_remove_recursive(lif->dentry);
+ lif->dentry = NULL;
+}
+
+void ionic_debugfs_del_qcq(struct ionic_qcq *qcq)
+{
+ debugfs_remove_recursive(qcq->dentry);
+ qcq->dentry = NULL;
+}
+
+#endif
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h
new file mode 100644
index 000000000000..c44ebde170b6
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_DEBUGFS_H_
+#define _IONIC_DEBUGFS_H_
+
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+void ionic_debugfs_create(void);
+void ionic_debugfs_destroy(void);
+void ionic_debugfs_add_dev(struct ionic *ionic);
+void ionic_debugfs_del_dev(struct ionic *ionic);
+void ionic_debugfs_add_ident(struct ionic *ionic);
+void ionic_debugfs_add_sizes(struct ionic *ionic);
+void ionic_debugfs_add_lif(struct ionic_lif *lif);
+void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq);
+void ionic_debugfs_del_lif(struct ionic_lif *lif);
+void ionic_debugfs_del_qcq(struct ionic_qcq *qcq);
+#else
+static inline void ionic_debugfs_create(void) { }
+static inline void ionic_debugfs_destroy(void) { }
+static inline void ionic_debugfs_add_dev(struct ionic *ionic) { }
+static inline void ionic_debugfs_del_dev(struct ionic *ionic) { }
+static inline void ionic_debugfs_add_ident(struct ionic *ionic) { }
+static inline void ionic_debugfs_add_sizes(struct ionic *ionic) { }
+static inline void ionic_debugfs_add_lif(struct ionic_lif *lif) { }
+static inline void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) { }
+static inline void ionic_debugfs_del_lif(struct ionic_lif *lif) { }
+static inline void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) { }
+#endif
+
+#endif /* _IONIC_DEBUGFS_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
new file mode 100644
index 000000000000..d168a6435322
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include "ionic.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+
+void ionic_init_devinfo(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type);
+ idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev);
+
+ memcpy_fromio(idev->dev_info.fw_version,
+ idev->dev_info_regs->fw_version,
+ IONIC_DEVINFO_FWVERS_BUFLEN);
+
+ memcpy_fromio(idev->dev_info.serial_num,
+ idev->dev_info_regs->serial_num,
+ IONIC_DEVINFO_SERIAL_BUFLEN);
+
+ idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0;
+ idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0;
+
+ dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
+}
+
+int ionic_dev_setup(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bar = ionic->bars;
+ unsigned int num_bars = ionic->num_bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ u32 sig;
+
+ /* BAR0: dev_cmd and interrupts */
+ if (num_bars < 1) {
+ dev_err(dev, "No bars found, aborting\n");
+ return -EFAULT;
+ }
+
+ if (bar->len < IONIC_BAR0_SIZE) {
+ dev_err(dev, "Resource bar size %lu too small, aborting\n",
+ bar->len);
+ return -EFAULT;
+ }
+
+ idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET;
+ idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET;
+ idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET;
+ idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET;
+
+ sig = ioread32(&idev->dev_info_regs->signature);
+ if (sig != IONIC_DEV_INFO_SIGNATURE) {
+ dev_err(dev, "Incompatible firmware signature %x", sig);
+ return -EFAULT;
+ }
+
+ ionic_init_devinfo(ionic);
+
+ /* BAR1: doorbells */
+ bar++;
+ if (num_bars < 2) {
+ dev_err(dev, "Doorbell bar missing, aborting\n");
+ return -EFAULT;
+ }
+
+ idev->db_pages = bar->vaddr;
+ idev->phy_db_pages = bar->bus_addr;
+
+ return 0;
+}
+
+void ionic_dev_teardown(struct ionic *ionic)
+{
+ /* place holder */
+}
+
+/* Devcmd Interface */
+u8 ionic_dev_cmd_status(struct ionic_dev *idev)
+{
+ return ioread8(&idev->dev_cmd_regs->comp.comp.status);
+}
+
+bool ionic_dev_cmd_done(struct ionic_dev *idev)
+{
+ return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
+}
+
+void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
+{
+ memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
+}
+
+void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
+{
+ memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
+ iowrite32(0, &idev->dev_cmd_regs->done);
+ iowrite32(1, &idev->dev_cmd_regs->doorbell);
+}
+
+/* Device commands */
+void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver)
+{
+ union ionic_dev_cmd cmd = {
+ .identify.opcode = IONIC_CMD_IDENTIFY,
+ .identify.ver = ver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_init(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .init.opcode = IONIC_CMD_INIT,
+ .init.type = 0,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_reset(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .reset.opcode = IONIC_CMD_RESET,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+/* Port commands */
+void ionic_dev_cmd_port_identify(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .port_init.opcode = IONIC_CMD_PORT_IDENTIFY,
+ .port_init.index = 0,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_init(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .port_init.opcode = IONIC_CMD_PORT_INIT,
+ .port_init.index = 0,
+ .port_init.info_pa = cpu_to_le64(idev->port_info_pa),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_reset(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .port_reset.opcode = IONIC_CMD_PORT_RESET,
+ .port_reset.index = 0,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state)
+{
+ union ionic_dev_cmd cmd = {
+ .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
+ .port_setattr.index = 0,
+ .port_setattr.attr = IONIC_PORT_ATTR_STATE,
+ .port_setattr.state = state,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed)
+{
+ union ionic_dev_cmd cmd = {
+ .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
+ .port_setattr.index = 0,
+ .port_setattr.attr = IONIC_PORT_ATTR_SPEED,
+ .port_setattr.speed = cpu_to_le32(speed),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable)
+{
+ union ionic_dev_cmd cmd = {
+ .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
+ .port_setattr.index = 0,
+ .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG,
+ .port_setattr.an_enable = an_enable,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type)
+{
+ union ionic_dev_cmd cmd = {
+ .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
+ .port_setattr.index = 0,
+ .port_setattr.attr = IONIC_PORT_ATTR_FEC,
+ .port_setattr.fec_type = fec_type,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
+{
+ union ionic_dev_cmd cmd = {
+ .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
+ .port_setattr.index = 0,
+ .port_setattr.attr = IONIC_PORT_ATTR_PAUSE,
+ .port_setattr.pause_type = pause_type,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+/* LIF commands */
+void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
+{
+ union ionic_dev_cmd cmd = {
+ .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY,
+ .lif_identify.type = type,
+ .lif_identify.ver = ver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
+ dma_addr_t info_pa)
+{
+ union ionic_dev_cmd cmd = {
+ .lif_init.opcode = IONIC_CMD_LIF_INIT,
+ .lif_init.index = cpu_to_le16(lif_index),
+ .lif_init.info_pa = cpu_to_le64(info_pa),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index)
+{
+ union ionic_dev_cmd cmd = {
+ .lif_init.opcode = IONIC_CMD_LIF_RESET,
+ .lif_init.index = cpu_to_le16(lif_index),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
+ u16 lif_index, u16 intr_index)
+{
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_cq *cq = &qcq->cq;
+
+ union ionic_dev_cmd cmd = {
+ .q_init.opcode = IONIC_CMD_Q_INIT,
+ .q_init.lif_index = cpu_to_le16(lif_index),
+ .q_init.type = q->type,
+ .q_init.index = cpu_to_le32(q->index),
+ .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_ENA),
+ .q_init.pid = cpu_to_le16(q->pid),
+ .q_init.intr_index = cpu_to_le16(intr_index),
+ .q_init.ring_size = ilog2(q->num_descs),
+ .q_init.ring_base = cpu_to_le64(q->base_pa),
+ .q_init.cq_ring_base = cpu_to_le64(cq->base_pa),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+int ionic_db_page_num(struct ionic_lif *lif, int pid)
+{
+ return (lif->hw_index * lif->dbid_count) + pid;
+}
+
+int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
+ struct ionic_intr_info *intr,
+ unsigned int num_descs, size_t desc_size)
+{
+ struct ionic_cq_info *cur;
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (desc_size == 0 || !is_power_of_2(num_descs))
+ return -EINVAL;
+
+ ring_size = ilog2(num_descs);
+ if (ring_size < 2 || ring_size > 16)
+ return -EINVAL;
+
+ cq->lif = lif;
+ cq->bound_intr = intr;
+ cq->num_descs = num_descs;
+ cq->desc_size = desc_size;
+ cq->tail = cq->info;
+ cq->done_color = 1;
+
+ cur = cq->info;
+
+ for (i = 0; i < num_descs; i++) {
+ if (i + 1 == num_descs) {
+ cur->next = cq->info;
+ cur->last = true;
+ } else {
+ cur->next = cur + 1;
+ }
+ cur->index = i;
+ cur++;
+ }
+
+ return 0;
+}
+
+void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa)
+{
+ struct ionic_cq_info *cur;
+ unsigned int i;
+
+ cq->base = base;
+ cq->base_pa = base_pa;
+
+ for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
+ cur->cq_desc = base + (i * cq->desc_size);
+}
+
+void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
+{
+ cq->bound_q = q;
+}
+
+unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
+ ionic_cq_cb cb, ionic_cq_done_cb done_cb,
+ void *done_arg)
+{
+ unsigned int work_done = 0;
+
+ if (work_to_do == 0)
+ return 0;
+
+ while (cb(cq, cq->tail)) {
+ if (cq->tail->last)
+ cq->done_color = !cq->done_color;
+ cq->tail = cq->tail->next;
+ DEBUG_STATS_CQE_CNT(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+
+ if (work_done && done_cb)
+ done_cb(done_arg);
+
+ return work_done;
+}
+
+int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
+ struct ionic_queue *q, unsigned int index, const char *name,
+ unsigned int num_descs, size_t desc_size,
+ size_t sg_desc_size, unsigned int pid)
+{
+ struct ionic_desc_info *cur;
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (desc_size == 0 || !is_power_of_2(num_descs))
+ return -EINVAL;
+
+ ring_size = ilog2(num_descs);
+ if (ring_size < 2 || ring_size > 16)
+ return -EINVAL;
+
+ q->lif = lif;
+ q->idev = idev;
+ q->index = index;
+ q->num_descs = num_descs;
+ q->desc_size = desc_size;
+ q->sg_desc_size = sg_desc_size;
+ q->tail = q->info;
+ q->head = q->tail;
+ q->pid = pid;
+
+ snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
+
+ cur = q->info;
+
+ for (i = 0; i < num_descs; i++) {
+ if (i + 1 == num_descs)
+ cur->next = q->info;
+ else
+ cur->next = cur + 1;
+ cur->index = i;
+ cur->left = num_descs - i;
+ cur++;
+ }
+
+ return 0;
+}
+
+void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
+{
+ struct ionic_desc_info *cur;
+ unsigned int i;
+
+ q->base = base;
+ q->base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->desc = base + (i * q->desc_size);
+}
+
+void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
+{
+ struct ionic_desc_info *cur;
+ unsigned int i;
+
+ q->sg_base = base;
+ q->sg_base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->sg_desc = base + (i * q->sg_desc_size);
+}
+
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
+ void *cb_arg)
+{
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_lif *lif = q->lif;
+
+ q->head->cb = cb;
+ q->head->cb_arg = cb_arg;
+ q->head = q->head->next;
+
+ dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
+ q->lif->index, q->name, q->hw_type, q->hw_index,
+ q->head->index, ring_doorbell);
+
+ if (ring_doorbell)
+ ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head->index);
+}
+
+static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
+{
+ unsigned int mask, tail, head;
+
+ mask = q->num_descs - 1;
+ tail = q->tail->index;
+ head = q->head->index;
+
+ return ((pos - tail) & mask) < ((head - tail) & mask);
+}
+
+void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
+ unsigned int stop_index)
+{
+ struct ionic_desc_info *desc_info;
+ ionic_desc_cb cb;
+ void *cb_arg;
+
+ /* check for empty queue */
+ if (q->tail->index == q->head->index)
+ return;
+
+ /* stop index must be for a descriptor that is not yet completed */
+ if (unlikely(!ionic_q_is_posted(q, stop_index)))
+ dev_err(q->lif->ionic->dev,
+ "ionic stop is not posted %s stop %u tail %u head %u\n",
+ q->name, stop_index, q->tail->index, q->head->index);
+
+ do {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+
+ cb = desc_info->cb;
+ cb_arg = desc_info->cb_arg;
+
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+
+ if (cb)
+ cb(q, desc_info, cq_info, cb_arg);
+ } while (desc_info->index != stop_index);
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
new file mode 100644
index 000000000000..9610aeb7d5f4
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_DEV_H_
+#define _IONIC_DEV_H_
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "ionic_if.h"
+#include "ionic_regs.h"
+
+#define IONIC_MIN_MTU ETH_MIN_MTU
+#define IONIC_MAX_MTU 9194
+#define IONIC_MAX_TXRX_DESC 16384
+#define IONIC_MIN_TXRX_DESC 16
+#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_LIFS_MAX 1024
+#define IONIC_ITR_COAL_USEC_DEFAULT 64
+
+#define IONIC_DEV_CMD_REG_VERSION 1
+#define IONIC_DEV_INFO_REG_COUNT 32
+#define IONIC_DEV_CMD_REG_COUNT 32
+
+struct ionic_dev_bar {
+ void __iomem *vaddr;
+ phys_addr_t bus_addr;
+ unsigned long len;
+ int res_index;
+};
+
+/* Registers */
+static_assert(sizeof(struct ionic_intr) == 32);
+
+static_assert(sizeof(struct ionic_doorbell) == 8);
+static_assert(sizeof(struct ionic_intr_status) == 8);
+static_assert(sizeof(union ionic_dev_regs) == 4096);
+static_assert(sizeof(union ionic_dev_info_regs) == 2048);
+static_assert(sizeof(union ionic_dev_cmd_regs) == 2048);
+static_assert(sizeof(struct ionic_lif_stats) == 1024);
+
+static_assert(sizeof(struct ionic_admin_cmd) == 64);
+static_assert(sizeof(struct ionic_admin_comp) == 16);
+static_assert(sizeof(struct ionic_nop_cmd) == 64);
+static_assert(sizeof(struct ionic_nop_comp) == 16);
+
+/* Device commands */
+static_assert(sizeof(struct ionic_dev_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_dev_identify_comp) == 16);
+static_assert(sizeof(struct ionic_dev_init_cmd) == 64);
+static_assert(sizeof(struct ionic_dev_init_comp) == 16);
+static_assert(sizeof(struct ionic_dev_reset_cmd) == 64);
+static_assert(sizeof(struct ionic_dev_reset_comp) == 16);
+static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_dev_getattr_comp) == 16);
+static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_dev_setattr_comp) == 16);
+
+/* Port commands */
+static_assert(sizeof(struct ionic_port_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_port_identify_comp) == 16);
+static_assert(sizeof(struct ionic_port_init_cmd) == 64);
+static_assert(sizeof(struct ionic_port_init_comp) == 16);
+static_assert(sizeof(struct ionic_port_reset_cmd) == 64);
+static_assert(sizeof(struct ionic_port_reset_comp) == 16);
+static_assert(sizeof(struct ionic_port_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_port_getattr_comp) == 16);
+static_assert(sizeof(struct ionic_port_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_port_setattr_comp) == 16);
+
+/* LIF commands */
+static_assert(sizeof(struct ionic_lif_init_cmd) == 64);
+static_assert(sizeof(struct ionic_lif_init_comp) == 16);
+static_assert(sizeof(struct ionic_lif_reset_cmd) == 64);
+static_assert(sizeof(ionic_lif_reset_comp) == 16);
+static_assert(sizeof(struct ionic_lif_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_lif_getattr_comp) == 16);
+static_assert(sizeof(struct ionic_lif_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_lif_setattr_comp) == 16);
+
+static_assert(sizeof(struct ionic_q_init_cmd) == 64);
+static_assert(sizeof(struct ionic_q_init_comp) == 16);
+static_assert(sizeof(struct ionic_q_control_cmd) == 64);
+static_assert(sizeof(ionic_q_control_comp) == 16);
+
+static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
+static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
+static_assert(sizeof(struct ionic_rx_filter_add_cmd) == 64);
+static_assert(sizeof(struct ionic_rx_filter_add_comp) == 16);
+static_assert(sizeof(struct ionic_rx_filter_del_cmd) == 64);
+static_assert(sizeof(ionic_rx_filter_del_comp) == 16);
+
+/* RDMA commands */
+static_assert(sizeof(struct ionic_rdma_reset_cmd) == 64);
+static_assert(sizeof(struct ionic_rdma_queue_cmd) == 64);
+
+/* Events */
+static_assert(sizeof(struct ionic_notifyq_cmd) == 4);
+static_assert(sizeof(union ionic_notifyq_comp) == 64);
+static_assert(sizeof(struct ionic_notifyq_event) == 64);
+static_assert(sizeof(struct ionic_link_change_event) == 64);
+static_assert(sizeof(struct ionic_reset_event) == 64);
+static_assert(sizeof(struct ionic_heartbeat_event) == 64);
+static_assert(sizeof(struct ionic_log_event) == 64);
+
+/* I/O */
+static_assert(sizeof(struct ionic_txq_desc) == 16);
+static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
+static_assert(sizeof(struct ionic_txq_comp) == 16);
+
+static_assert(sizeof(struct ionic_rxq_desc) == 16);
+static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
+static_assert(sizeof(struct ionic_rxq_comp) == 16);
+
+struct ionic_devinfo {
+ u8 asic_type;
+ u8 asic_rev;
+ char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN + 1];
+ char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN + 1];
+};
+
+struct ionic_dev {
+ union ionic_dev_info_regs __iomem *dev_info_regs;
+ union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
+
+ u64 __iomem *db_pages;
+ dma_addr_t phy_db_pages;
+
+ struct ionic_intr __iomem *intr_ctrl;
+ u64 __iomem *intr_status;
+
+ u32 port_info_sz;
+ struct ionic_port_info *port_info;
+ dma_addr_t port_info_pa;
+
+ struct ionic_devinfo dev_info;
+};
+
+struct ionic_cq_info {
+ void *cq_desc;
+ struct ionic_cq_info *next;
+ unsigned int index;
+ bool last;
+};
+
+struct ionic_queue;
+struct ionic_qcq;
+struct ionic_desc_info;
+
+typedef void (*ionic_desc_cb)(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, void *cb_arg);
+
+struct ionic_desc_info {
+ void *desc;
+ void *sg_desc;
+ struct ionic_desc_info *next;
+ unsigned int index;
+ unsigned int left;
+ ionic_desc_cb cb;
+ void *cb_arg;
+};
+
+#define QUEUE_NAME_MAX_SZ 32
+
+struct ionic_queue {
+ u64 dbell_count;
+ u64 drop;
+ u64 stop;
+ u64 wake;
+ struct ionic_lif *lif;
+ struct ionic_desc_info *info;
+ struct ionic_desc_info *tail;
+ struct ionic_desc_info *head;
+ struct ionic_dev *idev;
+ unsigned int index;
+ unsigned int type;
+ unsigned int hw_index;
+ unsigned int hw_type;
+ u64 dbval;
+ void *base;
+ void *sg_base;
+ dma_addr_t base_pa;
+ dma_addr_t sg_base_pa;
+ unsigned int num_descs;
+ unsigned int desc_size;
+ unsigned int sg_desc_size;
+ unsigned int pid;
+ char name[QUEUE_NAME_MAX_SZ];
+};
+
+#define INTR_INDEX_NOT_ASSIGNED -1
+#define INTR_NAME_MAX_SZ 32
+
+struct ionic_intr_info {
+ char name[INTR_NAME_MAX_SZ];
+ unsigned int index;
+ unsigned int vector;
+ u64 rearm_count;
+ unsigned int cpu;
+ cpumask_t affinity_mask;
+};
+
+struct ionic_cq {
+ void *base;
+ dma_addr_t base_pa;
+ struct ionic_lif *lif;
+ struct ionic_cq_info *info;
+ struct ionic_cq_info *tail;
+ struct ionic_queue *bound_q;
+ struct ionic_intr_info *bound_intr;
+ bool done_color;
+ unsigned int num_descs;
+ u64 compl_count;
+ unsigned int desc_size;
+};
+
+struct ionic;
+
+static inline void ionic_intr_init(struct ionic_dev *idev,
+ struct ionic_intr_info *intr,
+ unsigned long index)
+{
+ ionic_intr_clean(idev->intr_ctrl, index);
+ intr->index = index;
+}
+
+static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
+{
+ unsigned int avail = q->tail->index;
+
+ if (q->head->index >= avail)
+ avail += q->head->left - 1;
+ else
+ avail -= q->head->index + 1;
+
+ return avail;
+}
+
+static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
+{
+ return ionic_q_space_avail(q) >= want;
+}
+
+void ionic_init_devinfo(struct ionic *ionic);
+int ionic_dev_setup(struct ionic *ionic);
+void ionic_dev_teardown(struct ionic *ionic);
+
+void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
+u8 ionic_dev_cmd_status(struct ionic_dev *idev);
+bool ionic_dev_cmd_done(struct ionic_dev *idev);
+void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp);
+
+void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver);
+void ionic_dev_cmd_init(struct ionic_dev *idev);
+void ionic_dev_cmd_reset(struct ionic_dev *idev);
+
+void ionic_dev_cmd_port_identify(struct ionic_dev *idev);
+void ionic_dev_cmd_port_init(struct ionic_dev *idev);
+void ionic_dev_cmd_port_reset(struct ionic_dev *idev);
+void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state);
+void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed);
+void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
+void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
+void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
+
+void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
+void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
+ dma_addr_t addr);
+void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index);
+void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
+ u16 lif_index, u16 intr_index);
+
+int ionic_db_page_num(struct ionic_lif *lif, int pid);
+
+int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
+ struct ionic_intr_info *intr,
+ unsigned int num_descs, size_t desc_size);
+void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
+void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
+typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+typedef void (*ionic_cq_done_cb)(void *done_arg);
+unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
+ ionic_cq_cb cb, ionic_cq_done_cb done_cb,
+ void *done_arg);
+
+int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
+ struct ionic_queue *q, unsigned int index, const char *name,
+ unsigned int num_descs, size_t desc_size,
+ size_t sg_desc_size, unsigned int pid);
+void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
+void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
+ void *cb_arg);
+void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
+void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
+ unsigned int stop_index);
+
+#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
new file mode 100644
index 000000000000..af1647afa4e8
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_devlink.h"
+
+static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic *ionic = devlink_priv(dl);
+ struct ionic_dev *idev = &ionic->idev;
+ char buf[16];
+ int err = 0;
+
+ err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
+ if (err)
+ goto info_out;
+
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ idev->dev_info.fw_version);
+ if (err)
+ goto info_out;
+
+ snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (err)
+ goto info_out;
+
+ snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (err)
+ goto info_out;
+
+ err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
+
+info_out:
+ return err;
+}
+
+static const struct devlink_ops ionic_dl_ops = {
+ .info_get = ionic_dl_info_get,
+};
+
+struct ionic *ionic_devlink_alloc(struct device *dev)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic));
+
+ return devlink_priv(dl);
+}
+
+void ionic_devlink_free(struct ionic *ionic)
+{
+ struct devlink *dl = priv_to_devlink(ionic);
+
+ devlink_free(dl);
+}
+
+int ionic_devlink_register(struct ionic *ionic)
+{
+ struct devlink *dl = priv_to_devlink(ionic);
+ int err;
+
+ err = devlink_register(dl, ionic->dev);
+ if (err) {
+ dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
+ return err;
+ }
+
+ devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ 0, false, 0, NULL, 0);
+ err = devlink_port_register(dl, &ionic->dl_port, 0);
+ if (err)
+ dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
+ else
+ devlink_port_type_eth_set(&ionic->dl_port,
+ ionic->master_lif->netdev);
+
+ return err;
+}
+
+void ionic_devlink_unregister(struct ionic *ionic)
+{
+ struct devlink *dl = priv_to_devlink(ionic);
+
+ devlink_port_unregister(&ionic->dl_port);
+ devlink_unregister(dl);
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
new file mode 100644
index 000000000000..0690172fc57a
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_DEVLINK_H_
+#define _IONIC_DEVLINK_H_
+
+#include <net/devlink.h>
+
+struct ionic *ionic_devlink_alloc(struct device *dev);
+void ionic_devlink_free(struct ionic *ionic);
+int ionic_devlink_register(struct ionic *ionic);
+void ionic_devlink_unregister(struct ionic *ionic);
+
+#endif /* _IONIC_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
new file mode 100644
index 000000000000..7d10265f782a
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_ethtool.h"
+#include "ionic_stats.h"
+
+static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define PRIV_F_SW_DBG_STATS BIT(0)
+ "sw-dbg-stats",
+};
+#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
+
+static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
+{
+ u32 i;
+
+ for (i = 0; i < ionic_num_stats_grps; i++)
+ ionic_stats_groups[i].get_strings(lif, &buf);
+}
+
+static void ionic_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct ionic_lif *lif;
+ u32 i;
+
+ lif = netdev_priv(netdev);
+
+ memset(buf, 0, stats->n_stats * sizeof(*buf));
+ for (i = 0; i < ionic_num_stats_grps; i++)
+ ionic_stats_groups[i].get_values(lif, &buf);
+}
+
+static int ionic_get_stats_count(struct ionic_lif *lif)
+{
+ int i, num_stats = 0;
+
+ for (i = 0; i < ionic_num_stats_grps; i++)
+ num_stats += ionic_stats_groups[i].get_count(lif);
+
+ return num_stats;
+}
+
+static int ionic_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int count = 0;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ count = ionic_get_stats_count(lif);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ count = PRIV_FLAGS_COUNT;
+ break;
+ }
+ return count;
+}
+
+static void ionic_get_strings(struct net_device *netdev,
+ u32 sset, u8 *buf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ ionic_get_stats_strings(lif, buf);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, ionic_priv_flags_strings,
+ PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void ionic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+
+ strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
+ sizeof(drvinfo->bus_info));
+}
+
+static int ionic_get_regs_len(struct net_device *netdev)
+{
+ return (IONIC_DEV_INFO_REG_COUNT + IONIC_DEV_CMD_REG_COUNT) * sizeof(u32);
+}
+
+static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ unsigned int size;
+
+ regs->version = IONIC_DEV_CMD_REG_VERSION;
+
+ size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
+ memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
+
+ size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
+ memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
+}
+
+static int ionic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ int copper_seen = 0;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+
+ /* The port_info data is found in a DMA space that the NIC keeps
+ * up-to-date, so there's no need to request the data from the
+ * NIC, we already have it in our memory space.
+ */
+
+ switch (le16_to_cpu(idev->port_info->status.xcvr.pid)) {
+ /* Copper */
+ case IONIC_XCVR_PID_QSFP_100G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_40GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_SFP_25GBASE_CR_S:
+ case IONIC_XCVR_PID_SFP_25GBASE_CR_L:
+ case IONIC_XCVR_PID_SFP_25GBASE_CR_N:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_SFP_10GBASE_AOC:
+ case IONIC_XCVR_PID_SFP_10GBASE_CU:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseCR_Full);
+ copper_seen++;
+ break;
+
+ /* Fibre */
+ case IONIC_XCVR_PID_QSFP_100G_SR4:
+ case IONIC_XCVR_PID_QSFP_100G_AOC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_100G_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_100G_ER4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_40GBASE_SR4:
+ case IONIC_XCVR_PID_QSFP_40GBASE_AOC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_40GBASE_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_25GBASE_SR:
+ case IONIC_XCVR_PID_SFP_25GBASE_AOC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_10GBASE_SR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_10GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_10GBASE_LRM:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLRM_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_10GBASE_ER:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseER_Full);
+ break;
+ case IONIC_XCVR_PID_UNKNOWN:
+ /* This means there's no module plugged in */
+ break;
+ default:
+ dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n",
+ idev->port_info->status.xcvr.pid,
+ idev->port_info->status.xcvr.pid);
+ break;
+ }
+
+ bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_FC)
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER);
+ else if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_RS)
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+
+ if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_COPPER ||
+ copper_seen)
+ ks->base.port = PORT_DA;
+ else if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_FIBER)
+ ks->base.port = PORT_FIBRE;
+ else
+ ks->base.port = PORT_NONE;
+
+ if (ks->base.port != PORT_NONE) {
+ ks->base.speed = le32_to_cpu(lif->info->status.link_speed);
+
+ if (le16_to_cpu(lif->info->status.link_status))
+ ks->base.duplex = DUPLEX_FULL;
+ else
+ ks->base.duplex = DUPLEX_UNKNOWN;
+
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+
+ if (idev->port_info->config.an_enable) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ ks->base.autoneg = AUTONEG_ENABLE;
+ }
+ }
+
+ return 0;
+}
+
+static int ionic_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ struct ionic_dev *idev;
+ u32 req_rs, req_fc;
+ u8 fec_type;
+ int err = 0;
+
+ idev = &lif->ionic->idev;
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+
+ /* set autoneg */
+ if (ks->base.autoneg != idev->port_info->config.an_enable) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_autoneg(idev, ks->base.autoneg);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err)
+ return err;
+ }
+
+ /* set speed */
+ if (ks->base.speed != le32_to_cpu(idev->port_info->config.speed)) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_speed(idev, ks->base.speed);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err)
+ return err;
+ }
+
+ /* set FEC */
+ req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS);
+ req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER);
+ if (req_rs && req_fc) {
+ netdev_info(netdev, "Only select one FEC mode at a time\n");
+ return -EINVAL;
+ } else if (req_fc) {
+ fec_type = IONIC_PORT_FEC_TYPE_FC;
+ } else if (req_rs) {
+ fec_type = IONIC_PORT_FEC_TYPE_RS;
+ } else if (!(req_rs | req_fc)) {
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ }
+
+ if (fec_type != idev->port_info->config.fec_type) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_fec(idev, fec_type);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ionic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u8 pause_type;
+
+ pause->autoneg = 0;
+
+ pause_type = lif->ionic->idev.port_info->config.pause_type;
+ if (pause_type) {
+ pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0;
+ pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0;
+ }
+}
+
+static int ionic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u32 requested_pause;
+ int err;
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ /* change both at the same time */
+ requested_pause = IONIC_PORT_PAUSE_TYPE_LINK;
+ if (pause->rx_pause)
+ requested_pause |= IONIC_PAUSE_F_RX;
+ if (pause->tx_pause)
+ requested_pause |= IONIC_PAUSE_F_TX;
+
+ if (requested_pause == lif->ionic->idev.port_info->config.pause_type)
+ return 0;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_pause(&lif->ionic->idev, requested_pause);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ionic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ /* Tx uses Rx interrupt */
+ coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int ionic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_identity *ident;
+ struct ionic_qcq *qcq;
+ unsigned int i;
+ u32 usecs;
+ u32 coal;
+
+ if (coalesce->rx_max_coalesced_frames ||
+ coalesce->rx_coalesce_usecs_irq ||
+ coalesce->rx_max_coalesced_frames_irq ||
+ coalesce->tx_max_coalesced_frames ||
+ coalesce->tx_coalesce_usecs_irq ||
+ coalesce->tx_max_coalesced_frames_irq ||
+ coalesce->stats_block_coalesce_usecs ||
+ coalesce->use_adaptive_rx_coalesce ||
+ coalesce->use_adaptive_tx_coalesce ||
+ coalesce->pkt_rate_low ||
+ coalesce->rx_coalesce_usecs_low ||
+ coalesce->rx_max_coalesced_frames_low ||
+ coalesce->tx_coalesce_usecs_low ||
+ coalesce->tx_max_coalesced_frames_low ||
+ coalesce->pkt_rate_high ||
+ coalesce->rx_coalesce_usecs_high ||
+ coalesce->rx_max_coalesced_frames_high ||
+ coalesce->tx_coalesce_usecs_high ||
+ coalesce->tx_max_coalesced_frames_high ||
+ coalesce->rate_sample_interval)
+ return -EINVAL;
+
+ ident = &lif->ionic->ident;
+ if (ident->dev.intr_coal_div == 0) {
+ netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n",
+ ident->dev.intr_coal_div);
+ return -EIO;
+ }
+
+ /* Tx uses Rx interrupt, so only change Rx */
+ if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
+ netdev_warn(netdev, "only the rx-usecs can be changed\n");
+ return -EINVAL;
+ }
+
+ coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
+
+ if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ return -ERANGE;
+
+ /* If they asked for non-zero and it resolved to zero, bump it up */
+ if (!coal && coalesce->rx_coalesce_usecs)
+ coal = 1;
+
+ /* Convert it back to get device resolution */
+ usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+
+ if (usecs != lif->rx_coalesce_usecs) {
+ lif->rx_coalesce_usecs = usecs;
+
+ if (test_bit(IONIC_LIF_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ qcq = lif->rxqcqs[i].qcq;
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ qcq->intr.index, coal);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ionic_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->tx_pending = lif->ntxq_descs;
+ ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->rx_pending = lif->nrxq_descs;
+}
+
+static int ionic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ bool running;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(ring->tx_pending) ||
+ !is_power_of_2(ring->rx_pending)) {
+ netdev_info(netdev, "Descriptor count must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ /* if nothing to do return success */
+ if (ring->tx_pending == lif->ntxq_descs &&
+ ring->rx_pending == lif->nrxq_descs)
+ return 0;
+
+ if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
+ return -EBUSY;
+
+ running = test_bit(IONIC_LIF_UP, lif->state);
+ if (running)
+ ionic_stop(netdev);
+
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+
+ if (running)
+ ionic_open(netdev);
+ clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+
+ return 0;
+}
+
+static void ionic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ /* report maximum channels */
+ ch->max_combined = lif->ionic->ntxqs_per_lif;
+
+ /* report current channels */
+ ch->combined_count = lif->nxqs;
+}
+
+static int ionic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ bool running;
+
+ if (!ch->combined_count || ch->other_count ||
+ ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (ch->combined_count == lif->nxqs)
+ return 0;
+
+ if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
+ return -EBUSY;
+
+ running = test_bit(IONIC_LIF_UP, lif->state);
+ if (running)
+ ionic_stop(netdev);
+
+ lif->nxqs = ch->combined_count;
+
+ if (running)
+ ionic_open(netdev);
+ clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+
+ return 0;
+}
+
+static u32 ionic_get_priv_flags(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state))
+ priv_flags |= PRIV_F_SW_DBG_STATS;
+
+ return priv_flags;
+}
+
+static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u32 flags = lif->flags;
+
+ clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
+ if (priv_flags & PRIV_F_SW_DBG_STATS)
+ set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
+
+ if (flags != lif->flags)
+ lif->flags = flags;
+
+ return 0;
+}
+
+static int ionic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = lif->nxqs;
+ break;
+ default:
+ netdev_err(netdev, "Command parameter %d is not supported\n",
+ info->cmd);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static u32 ionic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ return le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
+}
+
+static u32 ionic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return IONIC_RSS_HASH_KEY_SIZE;
+}
+
+static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ unsigned int i, tbl_sz;
+
+ if (indir) {
+ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
+ for (i = 0; i < tbl_sz; i++)
+ indir[i] = lif->rss_ind_tbl[i];
+ }
+
+ if (key)
+ memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int err;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ err = ionic_lif_rss_config(lif, lif->rss_types, key, indir);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ionic_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct ionic_lif *lif = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ lif->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ionic_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = lif->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ionic_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct ionic_xcvr_status *xcvr;
+
+ xcvr = &idev->port_info->status.xcvr;
+
+ /* report the module data type and length */
+ switch (xcvr->sprom[0]) {
+ case 0x03: /* SFP */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ break;
+ case 0x0D: /* QSFP */
+ case 0x11: /* QSFP28 */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ default:
+ netdev_info(netdev, "unknown xcvr type 0x%02x\n",
+ xcvr->sprom[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static int ionic_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct ionic_xcvr_status *xcvr;
+ char tbuf[sizeof(xcvr->sprom)];
+ int count = 10;
+ u32 len;
+
+ /* The NIC keeps the module prom up-to-date in the DMA space
+ * so we can simply copy the module bytes into the data buffer.
+ */
+ xcvr = &idev->port_info->status.xcvr;
+ len = min_t(u32, sizeof(xcvr->sprom), ee->len);
+
+ do {
+ memcpy(data, xcvr->sprom, len);
+ memcpy(tbuf, xcvr->sprom, len);
+
+ /* Let's make sure we got a consistent copy */
+ if (!memcmp(data, tbuf, len))
+ break;
+
+ } while (--count);
+
+ if (!count)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ionic_nway_reset(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int err = 0;
+
+ /* flap the link to force auto-negotiation */
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_DOWN);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ if (!err) {
+ ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ }
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+static const struct ethtool_ops ionic_ethtool_ops = {
+ .get_drvinfo = ionic_get_drvinfo,
+ .get_regs_len = ionic_get_regs_len,
+ .get_regs = ionic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = ionic_get_link_ksettings,
+ .get_coalesce = ionic_get_coalesce,
+ .set_coalesce = ionic_set_coalesce,
+ .get_ringparam = ionic_get_ringparam,
+ .set_ringparam = ionic_set_ringparam,
+ .get_channels = ionic_get_channels,
+ .set_channels = ionic_set_channels,
+ .get_strings = ionic_get_strings,
+ .get_ethtool_stats = ionic_get_stats,
+ .get_sset_count = ionic_get_sset_count,
+ .get_priv_flags = ionic_get_priv_flags,
+ .set_priv_flags = ionic_set_priv_flags,
+ .get_rxnfc = ionic_get_rxnfc,
+ .get_rxfh_indir_size = ionic_get_rxfh_indir_size,
+ .get_rxfh_key_size = ionic_get_rxfh_key_size,
+ .get_rxfh = ionic_get_rxfh,
+ .set_rxfh = ionic_set_rxfh,
+ .get_tunable = ionic_get_tunable,
+ .set_tunable = ionic_set_tunable,
+ .get_module_info = ionic_get_module_info,
+ .get_module_eeprom = ionic_get_module_eeprom,
+ .get_pauseparam = ionic_get_pauseparam,
+ .set_pauseparam = ionic_set_pauseparam,
+ .set_link_ksettings = ionic_set_link_ksettings,
+ .nway_reset = ionic_nway_reset,
+};
+
+void ionic_ethtool_set_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ionic_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h
new file mode 100644
index 000000000000..38b91b1d70ae
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_ETHTOOL_H_
+#define _IONIC_ETHTOOL_H_
+
+void ionic_ethtool_set_ops(struct net_device *netdev);
+
+#endif /* _IONIC_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
new file mode 100644
index 000000000000..5bfdda19f64d
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -0,0 +1,2482 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
+
+#ifndef _IONIC_IF_H_
+#define _IONIC_IF_H_
+
+#pragma pack(push, 1)
+
+#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
+#define IONIC_DEV_INFO_VERSION 1
+#define IONIC_IFNAMSIZ 16
+
+/**
+ * Commands
+ */
+enum ionic_cmd_opcode {
+ IONIC_CMD_NOP = 0,
+
+ /* Device commands */
+ IONIC_CMD_IDENTIFY = 1,
+ IONIC_CMD_INIT = 2,
+ IONIC_CMD_RESET = 3,
+ IONIC_CMD_GETATTR = 4,
+ IONIC_CMD_SETATTR = 5,
+
+ /* Port commands */
+ IONIC_CMD_PORT_IDENTIFY = 10,
+ IONIC_CMD_PORT_INIT = 11,
+ IONIC_CMD_PORT_RESET = 12,
+ IONIC_CMD_PORT_GETATTR = 13,
+ IONIC_CMD_PORT_SETATTR = 14,
+
+ /* LIF commands */
+ IONIC_CMD_LIF_IDENTIFY = 20,
+ IONIC_CMD_LIF_INIT = 21,
+ IONIC_CMD_LIF_RESET = 22,
+ IONIC_CMD_LIF_GETATTR = 23,
+ IONIC_CMD_LIF_SETATTR = 24,
+
+ IONIC_CMD_RX_MODE_SET = 30,
+ IONIC_CMD_RX_FILTER_ADD = 31,
+ IONIC_CMD_RX_FILTER_DEL = 32,
+
+ /* Queue commands */
+ IONIC_CMD_Q_INIT = 40,
+ IONIC_CMD_Q_CONTROL = 41,
+
+ /* RDMA commands */
+ IONIC_CMD_RDMA_RESET_LIF = 50,
+ IONIC_CMD_RDMA_CREATE_EQ = 51,
+ IONIC_CMD_RDMA_CREATE_CQ = 52,
+ IONIC_CMD_RDMA_CREATE_ADMINQ = 53,
+
+ /* QoS commands */
+ IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
+ IONIC_CMD_QOS_CLASS_INIT = 241,
+ IONIC_CMD_QOS_CLASS_RESET = 242,
+
+ /* Firmware commands */
+ IONIC_CMD_FW_DOWNLOAD = 254,
+ IONIC_CMD_FW_CONTROL = 255,
+};
+
+/**
+ * Command Return codes
+ */
+enum ionic_status_code {
+ IONIC_RC_SUCCESS = 0, /* Success */
+ IONIC_RC_EVERSION = 1, /* Incorrect version for request */
+ IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */
+ IONIC_RC_EIO = 3, /* I/O error */
+ IONIC_RC_EPERM = 4, /* Permission denied */
+ IONIC_RC_EQID = 5, /* Bad qid */
+ IONIC_RC_EQTYPE = 6, /* Bad qtype */
+ IONIC_RC_ENOENT = 7, /* No such element */
+ IONIC_RC_EINTR = 8, /* operation interrupted */
+ IONIC_RC_EAGAIN = 9, /* Try again */
+ IONIC_RC_ENOMEM = 10, /* Out of memory */
+ IONIC_RC_EFAULT = 11, /* Bad address */
+ IONIC_RC_EBUSY = 12, /* Device or resource busy */
+ IONIC_RC_EEXIST = 13, /* object already exists */
+ IONIC_RC_EINVAL = 14, /* Invalid argument */
+ IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */
+ IONIC_RC_ERANGE = 16, /* Parameter out of range */
+ IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */
+ IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */
+ IONIC_RC_ENOSUPP = 19, /* Operation not supported */
+ IONIC_RC_ERROR = 29, /* Generic error */
+
+ IONIC_RC_ERDMA = 30, /* Generic RDMA error */
+};
+
+enum ionic_notifyq_opcode {
+ IONIC_EVENT_LINK_CHANGE = 1,
+ IONIC_EVENT_RESET = 2,
+ IONIC_EVENT_HEARTBEAT = 3,
+ IONIC_EVENT_LOG = 4,
+};
+
+/**
+ * struct cmd - General admin command format
+ * @opcode: Opcode for the command
+ * @lif_index: LIF index
+ * @cmd_data: Opcode-specific command bytes
+ */
+struct ionic_admin_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ u8 cmd_data[60];
+};
+
+/**
+ * struct admin_comp - General admin command completion format
+ * @status: The status of the command (enum status_code)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @cmd_data: Command-specific bytes.
+ * @color: Color bit. (Always 0 for commands issued to the
+ * Device Cmd Registers.)
+ */
+struct ionic_admin_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 cmd_data[11];
+ u8 color;
+#define IONIC_COMP_COLOR_MASK 0x80
+};
+
+static inline u8 color_match(u8 color, u8 done_color)
+{
+ return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color;
+}
+
+/**
+ * struct nop_cmd - NOP command
+ * @opcode: opcode
+ */
+struct ionic_nop_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct nop_comp - NOP command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_nop_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+/**
+ * struct dev_init_cmd - Device init command
+ * @opcode: opcode
+ * @type: device type
+ */
+struct ionic_dev_init_cmd {
+ u8 opcode;
+ u8 type;
+ u8 rsvd[62];
+};
+
+/**
+ * struct init_comp - Device init command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_dev_init_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+/**
+ * struct dev_reset_cmd - Device reset command
+ * @opcode: opcode
+ */
+struct ionic_dev_reset_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct reset_comp - Reset command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_dev_reset_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+#define IONIC_IDENTITY_VERSION_1 1
+
+/**
+ * struct dev_identify_cmd - Driver/device identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ */
+struct ionic_dev_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct dev_identify_comp - Driver/device identify command completion
+ * @status: The status of the command (enum status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_dev_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+enum ionic_os_type {
+ IONIC_OS_TYPE_LINUX = 1,
+ IONIC_OS_TYPE_WIN = 2,
+ IONIC_OS_TYPE_DPDK = 3,
+ IONIC_OS_TYPE_FREEBSD = 4,
+ IONIC_OS_TYPE_IPXE = 5,
+ IONIC_OS_TYPE_ESXI = 6,
+};
+
+/**
+ * union drv_identity - driver identity information
+ * @os_type: OS type (see enum os_type)
+ * @os_dist: OS distribution, numeric format
+ * @os_dist_str: OS distribution, string format
+ * @kernel_ver: Kernel version, numeric format
+ * @kernel_ver_str: Kernel version, string format
+ * @driver_ver_str: Driver version, string format
+ */
+union ionic_drv_identity {
+ struct {
+ __le32 os_type;
+ __le32 os_dist;
+ char os_dist_str[128];
+ __le32 kernel_ver;
+ char kernel_ver_str[32];
+ char driver_ver_str[32];
+ };
+ __le32 words[512];
+};
+
+/**
+ * union dev_identity - device identity information
+ * @version: Version of device identify
+ * @type: Identify type (0 for now)
+ * @nports: Number of ports provisioned
+ * @nlifs: Number of LIFs provisioned
+ * @nintrs: Number of interrupts provisioned
+ * @ndbpgs_per_lif: Number of doorbell pages per LIF
+ * @intr_coal_mult: Interrupt coalescing multiplication factor.
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @intr_coal_div: Interrupt coalescing division factor.
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ *
+ */
+union ionic_dev_identity {
+ struct {
+ u8 version;
+ u8 type;
+ u8 rsvd[2];
+ u8 nports;
+ u8 rsvd2[3];
+ __le32 nlifs;
+ __le32 nintrs;
+ __le32 ndbpgs_per_lif;
+ __le32 intr_coal_mult;
+ __le32 intr_coal_div;
+ };
+ __le32 words[512];
+};
+
+enum ionic_lif_type {
+ IONIC_LIF_TYPE_CLASSIC = 0,
+ IONIC_LIF_TYPE_MACVLAN = 1,
+ IONIC_LIF_TYPE_NETQUEUE = 2,
+};
+
+/**
+ * struct lif_identify_cmd - lif identify command
+ * @opcode: opcode
+ * @type: lif type (enum lif_type)
+ * @ver: version of identify returned by device
+ */
+struct ionic_lif_identify_cmd {
+ u8 opcode;
+ u8 type;
+ u8 ver;
+ u8 rsvd[61];
+};
+
+/**
+ * struct lif_identify_comp - lif identify command completion
+ * @status: status of the command (enum status_code)
+ * @ver: version of identify returned by device
+ */
+struct ionic_lif_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd2[14];
+};
+
+enum ionic_lif_capability {
+ IONIC_LIF_CAP_ETH = BIT(0),
+ IONIC_LIF_CAP_RDMA = BIT(1),
+};
+
+/**
+ * Logical Queue Types
+ */
+enum ionic_logical_qtype {
+ IONIC_QTYPE_ADMINQ = 0,
+ IONIC_QTYPE_NOTIFYQ = 1,
+ IONIC_QTYPE_RXQ = 2,
+ IONIC_QTYPE_TXQ = 3,
+ IONIC_QTYPE_EQ = 4,
+ IONIC_QTYPE_MAX = 16,
+};
+
+/**
+ * struct lif_logical_qtype - Descriptor of logical to hardware queue type.
+ * @qtype: Hardware Queue Type.
+ * @qid_count: Number of Queue IDs of the logical type.
+ * @qid_base: Minimum Queue ID of the logical type.
+ */
+struct ionic_lif_logical_qtype {
+ u8 qtype;
+ u8 rsvd[3];
+ __le32 qid_count;
+ __le32 qid_base;
+};
+
+enum ionic_lif_state {
+ IONIC_LIF_DISABLE = 0,
+ IONIC_LIF_ENABLE = 1,
+ IONIC_LIF_HANG_RESET = 2,
+};
+
+/**
+ * LIF configuration
+ * @state: lif state (enum lif_state)
+ * @name: lif name
+ * @mtu: mtu
+ * @mac: station mac address
+ * @features: features (enum eth_hw_features)
+ * @queue_count: queue counts per queue-type
+ */
+union ionic_lif_config {
+ struct {
+ u8 state;
+ u8 rsvd[3];
+ char name[IONIC_IFNAMSIZ];
+ __le32 mtu;
+ u8 mac[6];
+ u8 rsvd2[2];
+ __le64 features;
+ __le32 queue_count[IONIC_QTYPE_MAX];
+ };
+ __le32 words[64];
+};
+
+/**
+ * struct lif_identity - lif identity information (type-specific)
+ *
+ * @capabilities LIF capabilities
+ *
+ * Ethernet:
+ * @version: Ethernet identify structure version.
+ * @features: Ethernet features supported on this lif type.
+ * @max_ucast_filters: Number of perfect unicast addresses supported.
+ * @max_mcast_filters: Number of perfect multicast addresses supported.
+ * @min_frame_size: Minimum size of frames to be sent
+ * @max_frame_size: Maximim size of frames to be sent
+ * @config: LIF config struct with features, mtu, mac, q counts
+ *
+ * RDMA:
+ * @version: RDMA version of opcodes and queue descriptors.
+ * @qp_opcodes: Number of rdma queue pair opcodes supported.
+ * @admin_opcodes: Number of rdma admin opcodes supported.
+ * @npts_per_lif: Page table size per lif
+ * @nmrs_per_lif: Number of memory regions per lif
+ * @nahs_per_lif: Number of address handles per lif
+ * @max_stride: Max work request stride.
+ * @cl_stride: Cache line stride.
+ * @pte_stride: Page table entry stride.
+ * @rrq_stride: Remote RQ work request stride.
+ * @rsq_stride: Remote SQ work request stride.
+ * @dcqcn_profiles: Number of DCQCN profiles
+ * @aq_qtype: RDMA Admin Qtype.
+ * @sq_qtype: RDMA Send Qtype.
+ * @rq_qtype: RDMA Receive Qtype.
+ * @cq_qtype: RDMA Completion Qtype.
+ * @eq_qtype: RDMA Event Qtype.
+ */
+union ionic_lif_identity {
+ struct {
+ __le64 capabilities;
+
+ struct {
+ u8 version;
+ u8 rsvd[3];
+ __le32 max_ucast_filters;
+ __le32 max_mcast_filters;
+ __le16 rss_ind_tbl_sz;
+ __le32 min_frame_size;
+ __le32 max_frame_size;
+ u8 rsvd2[106];
+ union ionic_lif_config config;
+ } eth;
+
+ struct {
+ u8 version;
+ u8 qp_opcodes;
+ u8 admin_opcodes;
+ u8 rsvd;
+ __le32 npts_per_lif;
+ __le32 nmrs_per_lif;
+ __le32 nahs_per_lif;
+ u8 max_stride;
+ u8 cl_stride;
+ u8 pte_stride;
+ u8 rrq_stride;
+ u8 rsq_stride;
+ u8 dcqcn_profiles;
+ u8 rsvd_dimensions[10];
+ struct ionic_lif_logical_qtype aq_qtype;
+ struct ionic_lif_logical_qtype sq_qtype;
+ struct ionic_lif_logical_qtype rq_qtype;
+ struct ionic_lif_logical_qtype cq_qtype;
+ struct ionic_lif_logical_qtype eq_qtype;
+ } rdma;
+ };
+ __le32 words[512];
+};
+
+/**
+ * struct lif_init_cmd - LIF init command
+ * @opcode: opcode
+ * @type: LIF type (enum lif_type)
+ * @index: LIF index
+ * @info_pa: destination address for lif info (struct lif_info)
+ */
+struct ionic_lif_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 index;
+ __le32 rsvd;
+ __le64 info_pa;
+ u8 rsvd2[48];
+};
+
+/**
+ * struct lif_init_comp - LIF init command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_lif_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 hw_index;
+ u8 rsvd2[12];
+};
+
+/**
+ * struct q_init_cmd - Queue init command
+ * @opcode: opcode
+ * @type: Logical queue type
+ * @ver: Queue version (defines opcode/descriptor scope)
+ * @lif_index: LIF index
+ * @index: (lif, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index
+ * @pid: Process ID
+ * @flags:
+ * IRQ: Interrupt requested on completion
+ * ENA: Enable the queue. If ENA=0 the queue is initialized
+ * but remains disabled, to be later enabled with the
+ * Queue Enable command. If ENA=1, then queue is
+ * initialized and then enabled.
+ * SG: Enable Scatter-Gather on the queue.
+ * in number of descs. The actual ring size is
+ * (1 << ring_size). For example, to
+ * select a ring size of 64 descriptors write
+ * ring_size = 6. The minimum ring_size value is 2
+ * for a ring size of 4 descriptors. The maximum
+ * ring_size value is 16 for a ring size of 64k
+ * descriptors. Values of ring_size <2 and >16 are
+ * reserved.
+ * EQ: Enable the Event Queue
+ * @cos: Class of service for this queue.
+ * @ring_size: Queue ring size, encoded as a log2(size)
+ * @ring_base: Queue ring base address
+ * @cq_ring_base: Completion queue ring base address
+ * @sg_ring_base: Scatter/Gather ring base address
+ * @eq_index: Event queue index
+ */
+struct ionic_q_init_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ u8 type;
+ u8 ver;
+ u8 rsvd1[2];
+ __le32 index;
+ __le16 pid;
+ __le16 intr_index;
+ __le16 flags;
+#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */
+#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */
+#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */
+#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */
+#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
+ u8 cos;
+ u8 ring_size;
+ __le64 ring_base;
+ __le64 cq_ring_base;
+ __le64 sg_ring_base;
+ __le32 eq_index;
+ u8 rsvd2[16];
+};
+
+/**
+ * struct q_init_comp - Queue init command completion
+ * @status: The status of the command (enum status_code)
+ * @ver: Queue version (defines opcode/descriptor scope)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @hw_index: Hardware Queue ID
+ * @hw_type: Hardware Queue type
+ * @color: Color
+ */
+struct ionic_q_init_comp {
+ u8 status;
+ u8 ver;
+ __le16 comp_index;
+ __le32 hw_index;
+ u8 hw_type;
+ u8 rsvd2[6];
+ u8 color;
+};
+
+/* the device's internal addressing uses up to 52 bits */
+#define IONIC_ADDR_LEN 52
+#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1)
+
+enum ionic_txq_desc_opcode {
+ IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0,
+ IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1,
+ IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2,
+ IONIC_TXQ_DESC_OPCODE_TSO = 3,
+};
+
+/**
+ * struct txq_desc - Ethernet Tx queue descriptor format
+ * @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
+ *
+ * IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
+ *
+ * Non-offload send. No segmentation,
+ * fragmentation or checksum calc/insertion is
+ * performed by device; packet is prepared
+ * to send by software stack and requires
+ * no further manipulation from device.
+ *
+ * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL:
+ *
+ * Offload 16-bit L4 checksum
+ * calculation/insertion. The device will
+ * calculate the L4 checksum value and
+ * insert the result in the packet's L4
+ * header checksum field. The L4 checksum
+ * is calculated starting at @csum_start bytes
+ * into the packet to the end of the packet.
+ * The checksum insertion position is given
+ * in @csum_offset. This feature is only
+ * applicable to protocols such as TCP, UDP
+ * and ICMP where a standard (i.e. the
+ * 'IP-style' checksum) one's complement
+ * 16-bit checksum is used, using an IP
+ * pseudo-header to seed the calculation.
+ * Software will preload the L4 checksum
+ * field with the IP pseudo-header checksum.
+ *
+ * For tunnel encapsulation, @csum_start and
+ * @csum_offset refer to the inner L4
+ * header. Supported tunnels encapsulations
+ * are: IPIP, GRE, and UDP. If the @encap
+ * is clear, no further processing by the
+ * device is required; software will
+ * calculate the outer header checksums. If
+ * the @encap is set, the device will
+ * offload the outer header checksums using
+ * LCO (local checksum offload) (see
+ * Documentation/networking/checksum-
+ * offloads.txt for more info).
+ *
+ * IONIC_TXQ_DESC_OPCODE_CSUM_HW:
+ *
+ * Offload 16-bit checksum computation to hardware.
+ * If @csum_l3 is set then the packet's L3 checksum is
+ * updated. Similarly, if @csum_l4 is set the the L4
+ * checksum is updated. If @encap is set then encap header
+ * checksums are also updated.
+ *
+ * IONIC_TXQ_DESC_OPCODE_TSO:
+ *
+ * Device preforms TCP segmentation offload
+ * (TSO). @hdr_len is the number of bytes
+ * to the end of TCP header (the offset to
+ * the TCP payload). @mss is the desired
+ * MSS, the TCP payload length for each
+ * segment. The device will calculate/
+ * insert IP (IPv4 only) and TCP checksums
+ * for each segment. In the first data
+ * buffer containing the header template,
+ * the driver will set IPv4 checksum to 0
+ * and preload TCP checksum with the IP
+ * pseudo header calculated with IP length = 0.
+ *
+ * Supported tunnel encapsulations are IPIP,
+ * layer-3 GRE, and UDP. @hdr_len includes
+ * both outer and inner headers. The driver
+ * will set IPv4 checksum to zero and
+ * preload TCP checksum with IP pseudo
+ * header on the inner header.
+ *
+ * TCP ECN offload is supported. The device
+ * will set CWR flag in the first segment if
+ * CWR is set in the template header, and
+ * clear CWR in remaining segments.
+ * @flags:
+ * vlan:
+ * Insert an L2 VLAN header using @vlan_tci.
+ * encap:
+ * Calculate encap header checksum.
+ * csum_l3:
+ * Compute L3 header checksum.
+ * csum_l4:
+ * Compute L4 header checksum.
+ * tso_sot:
+ * TSO start
+ * tso_eot:
+ * TSO end
+ * @num_sg_elems: Number of scatter-gather elements in SG
+ * descriptor
+ * @addr: First data buffer's DMA address.
+ * (Subsequent data buffers are on txq_sg_desc).
+ * @len: First data buffer's length, in bytes
+ * @vlan_tci: VLAN tag to insert in the packet (if requested
+ * by @V-bit). Includes .1p and .1q tags
+ * @hdr_len: Length of packet headers, including
+ * encapsulating outer header, if applicable.
+ * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and
+ * TXQ_DESC_OPCODE_TSO. Should be set to zero for
+ * all other modes. For
+ * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
+ * of headers up to inner-most L4 header. For
+ * TXQ_DESC_OPCODE_TSO, @hdr_len is up to
+ * inner-most L4 payload, so inclusive of
+ * inner-most L4 header.
+ * @mss: Desired MSS value for TSO. Only applicable for
+ * TXQ_DESC_OPCODE_TSO.
+ * @csum_start: Offset into inner-most L3 header of checksum
+ * @csum_offset: Offset into inner-most L4 header of checksum
+ */
+
+#define IONIC_TXQ_DESC_OPCODE_MASK 0xf
+#define IONIC_TXQ_DESC_OPCODE_SHIFT 4
+#define IONIC_TXQ_DESC_FLAGS_MASK 0xf
+#define IONIC_TXQ_DESC_FLAGS_SHIFT 0
+#define IONIC_TXQ_DESC_NSGE_MASK 0xf
+#define IONIC_TXQ_DESC_NSGE_SHIFT 8
+#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1)
+#define IONIC_TXQ_DESC_ADDR_SHIFT 12
+
+/* common flags */
+#define IONIC_TXQ_DESC_FLAG_VLAN 0x1
+#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2
+
+/* flags for csum_hw opcode */
+#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4
+#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8
+
+/* flags for tso opcode */
+#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4
+#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8
+
+struct ionic_txq_desc {
+ __le64 cmd;
+ __le16 len;
+ union {
+ __le16 vlan_tci;
+ __le16 hword0;
+ };
+ union {
+ __le16 csum_start;
+ __le16 hdr_len;
+ __le16 hword1;
+ };
+ union {
+ __le16 csum_offset;
+ __le16 mss;
+ __le16 hword2;
+ };
+};
+
+static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags,
+ u8 nsge, u64 addr)
+{
+ u64 cmd;
+
+ cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << IONIC_TXQ_DESC_OPCODE_SHIFT;
+ cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << IONIC_TXQ_DESC_FLAGS_SHIFT;
+ cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT;
+ cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT;
+
+ return cmd;
+};
+
+static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
+ u8 *nsge, u64 *addr)
+{
+ *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & IONIC_TXQ_DESC_OPCODE_MASK;
+ *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & IONIC_TXQ_DESC_FLAGS_MASK;
+ *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK;
+ *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK;
+};
+
+#define IONIC_TX_MAX_SG_ELEMS 8
+#define IONIC_RX_MAX_SG_ELEMS 8
+
+/**
+ * struct txq_sg_desc - Transmit scatter-gather (SG) list
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ */
+struct ionic_txq_sg_desc {
+ struct ionic_txq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+ } elems[IONIC_TX_MAX_SG_ELEMS];
+};
+
+/**
+ * struct txq_comp - Ethernet transmit queue completion descriptor
+ * @status: The status of the command (enum status_code)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @color: Color bit.
+ */
+struct ionic_txq_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+};
+
+enum ionic_rxq_desc_opcode {
+ IONIC_RXQ_DESC_OPCODE_SIMPLE = 0,
+ IONIC_RXQ_DESC_OPCODE_SG = 1,
+};
+
+/**
+ * struct rxq_desc - Ethernet Rx queue descriptor format
+ * @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
+ *
+ * RXQ_DESC_OPCODE_SIMPLE:
+ *
+ * Receive full packet into data buffer
+ * starting at @addr. Results of
+ * receive, including actual bytes received,
+ * are recorded in Rx completion descriptor.
+ *
+ * @len: Data buffer's length, in bytes.
+ * @addr: Data buffer's DMA address
+ */
+struct ionic_rxq_desc {
+ u8 opcode;
+ u8 rsvd[5];
+ __le16 len;
+ __le64 addr;
+};
+
+/**
+ * struct rxq_sg_desc - Receive scatter-gather (SG) list
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ */
+struct ionic_rxq_sg_desc {
+ struct ionic_rxq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+ } elems[IONIC_RX_MAX_SG_ELEMS];
+};
+
+/**
+ * struct rxq_comp - Ethernet receive queue completion descriptor
+ * @status: The status of the command (enum status_code)
+ * @num_sg_elems: Number of SG elements used by this descriptor
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @rss_hash: 32-bit RSS hash
+ * @csum: 16-bit sum of the packet's L2 payload.
+ * If the packet's L2 payload is odd length, an extra
+ * zero-value byte is included in the @csum calculation but
+ * not included in @len.
+ * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is
+ * set. Includes .1p and .1q tags.
+ * @len: Received packet length, in bytes. Excludes FCS.
+ * @csum_calc L2 payload checksum is computed or not
+ * @csum_tcp_ok: The TCP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * TCP header
+ * @csum_tcp_bad: The TCP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * TCP header.
+ * @csum_udp_ok: The UDP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * UDP header
+ * @csum_udp_bad: The UDP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * UDP header.
+ * @csum_ip_ok: The IPv4 checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for the both IPv4 headers.
+ * @csum_ip_bad: The IPv4 checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for both IP headers.
+ * @VLAN: VLAN header was stripped and placed in @vlan_tci.
+ * @pkt_type: Packet type
+ * @color: Color bit.
+ */
+struct ionic_rxq_comp {
+ u8 status;
+ u8 num_sg_elems;
+ __le16 comp_index;
+ __le32 rss_hash;
+ __le16 csum;
+ __le16 vlan_tci;
+ __le16 len;
+ u8 csum_flags;
+#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01
+#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02
+#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04
+#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08
+#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10
+#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20
+#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40
+#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80
+ u8 pkt_type_color;
+#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f
+};
+
+enum ionic_pkt_type {
+ IONIC_PKT_TYPE_NON_IP = 0x000,
+ IONIC_PKT_TYPE_IPV4 = 0x001,
+ IONIC_PKT_TYPE_IPV4_TCP = 0x003,
+ IONIC_PKT_TYPE_IPV4_UDP = 0x005,
+ IONIC_PKT_TYPE_IPV6 = 0x008,
+ IONIC_PKT_TYPE_IPV6_TCP = 0x018,
+ IONIC_PKT_TYPE_IPV6_UDP = 0x028,
+};
+
+enum ionic_eth_hw_features {
+ IONIC_ETH_HW_VLAN_TX_TAG = BIT(0),
+ IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1),
+ IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2),
+ IONIC_ETH_HW_RX_HASH = BIT(3),
+ IONIC_ETH_HW_RX_CSUM = BIT(4),
+ IONIC_ETH_HW_TX_SG = BIT(5),
+ IONIC_ETH_HW_RX_SG = BIT(6),
+ IONIC_ETH_HW_TX_CSUM = BIT(7),
+ IONIC_ETH_HW_TSO = BIT(8),
+ IONIC_ETH_HW_TSO_IPV6 = BIT(9),
+ IONIC_ETH_HW_TSO_ECN = BIT(10),
+ IONIC_ETH_HW_TSO_GRE = BIT(11),
+ IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12),
+ IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
+ IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
+ IONIC_ETH_HW_TSO_UDP = BIT(15),
+ IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
+};
+
+/**
+ * struct q_control_cmd - Queue control command
+ * @opcode: opcode
+ * @type: Queue type
+ * @lif_index: LIF index
+ * @index: Queue index
+ * @oper: Operation (enum q_control_oper)
+ */
+struct ionic_q_control_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 lif_index;
+ __le32 index;
+ u8 oper;
+ u8 rsvd[55];
+};
+
+typedef struct ionic_admin_comp ionic_q_control_comp;
+
+enum q_control_oper {
+ IONIC_Q_DISABLE = 0,
+ IONIC_Q_ENABLE = 1,
+ IONIC_Q_HANG_RESET = 2,
+};
+
+/**
+ * Physical connection type
+ */
+enum ionic_phy_type {
+ IONIC_PHY_TYPE_NONE = 0,
+ IONIC_PHY_TYPE_COPPER = 1,
+ IONIC_PHY_TYPE_FIBER = 2,
+};
+
+/**
+ * Transceiver status
+ */
+enum ionic_xcvr_state {
+ IONIC_XCVR_STATE_REMOVED = 0,
+ IONIC_XCVR_STATE_INSERTED = 1,
+ IONIC_XCVR_STATE_PENDING = 2,
+ IONIC_XCVR_STATE_SPROM_READ = 3,
+ IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
+};
+
+/**
+ * Supported link modes
+ */
+enum ionic_xcvr_pid {
+ IONIC_XCVR_PID_UNKNOWN = 0,
+
+ /* CU */
+ IONIC_XCVR_PID_QSFP_100G_CR4 = 1,
+ IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2,
+ IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
+ IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
+ IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
+
+ /* Fiber */
+ IONIC_XCVR_PID_QSFP_100G_AOC = 50,
+ IONIC_XCVR_PID_QSFP_100G_ACC = 51,
+ IONIC_XCVR_PID_QSFP_100G_SR4 = 52,
+ IONIC_XCVR_PID_QSFP_100G_LR4 = 53,
+ IONIC_XCVR_PID_QSFP_100G_ER4 = 54,
+ IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55,
+ IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56,
+ IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57,
+ IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58,
+ IONIC_XCVR_PID_SFP_25GBASE_SR = 59,
+ IONIC_XCVR_PID_SFP_25GBASE_LR = 60,
+ IONIC_XCVR_PID_SFP_25GBASE_ER = 61,
+ IONIC_XCVR_PID_SFP_25GBASE_AOC = 62,
+ IONIC_XCVR_PID_SFP_10GBASE_SR = 63,
+ IONIC_XCVR_PID_SFP_10GBASE_LR = 64,
+ IONIC_XCVR_PID_SFP_10GBASE_LRM = 65,
+ IONIC_XCVR_PID_SFP_10GBASE_ER = 66,
+ IONIC_XCVR_PID_SFP_10GBASE_AOC = 67,
+ IONIC_XCVR_PID_SFP_10GBASE_CU = 68,
+ IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
+ IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
+};
+
+/**
+ * Port types
+ */
+enum ionic_port_type {
+ IONIC_PORT_TYPE_NONE = 0, /* port type not configured */
+ IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */
+ IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */
+};
+
+/**
+ * Port config state
+ */
+enum ionic_port_admin_state {
+ IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */
+ IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */
+ IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */
+};
+
+/**
+ * Port operational status
+ */
+enum ionic_port_oper_status {
+ IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */
+ IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */
+ IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */
+};
+
+/**
+ * Ethernet Forward error correction (fec) modes
+ */
+enum ionic_port_fec_type {
+ IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */
+ IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */
+ IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */
+};
+
+/**
+ * Ethernet pause (flow control) modes
+ */
+enum ionic_port_pause_type {
+ IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */
+ IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */
+ IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */
+};
+
+/**
+ * Loopback modes
+ */
+enum ionic_port_loopback_mode {
+ IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */
+ IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */
+ IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */
+};
+
+/**
+ * Transceiver Status information
+ * @state: Transceiver status (enum xcvr_state)
+ * @phy: Physical connection type (enum phy_type)
+ * @pid: Transceiver link mode (enum pid)
+ * @sprom: Transceiver sprom contents
+ */
+struct ionic_xcvr_status {
+ u8 state;
+ u8 phy;
+ __le16 pid;
+ u8 sprom[256];
+};
+
+/**
+ * Port configuration
+ * @speed: port speed (in Mbps)
+ * @mtu: mtu
+ * @state: port admin state (enum port_admin_state)
+ * @an_enable: autoneg enable
+ * @fec_type: fec type (enum port_fec_type)
+ * @pause_type: pause type (enum port_pause_type)
+ * @loopback_mode: loopback mode (enum port_loopback_mode)
+ */
+union ionic_port_config {
+ struct {
+#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
+#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
+#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
+#define IONIC_SPEED_25G 25000 /* 25G in Mbps */
+#define IONIC_SPEED_10G 10000 /* 10G in Mbps */
+#define IONIC_SPEED_1G 1000 /* 1G in Mbps */
+ __le32 speed;
+ __le32 mtu;
+ u8 state;
+ u8 an_enable;
+ u8 fec_type;
+#define IONIC_PAUSE_TYPE_MASK 0x0f
+#define IONIC_PAUSE_FLAGS_MASK 0xf0
+#define IONIC_PAUSE_F_TX 0x10
+#define IONIC_PAUSE_F_RX 0x20
+ u8 pause_type;
+ u8 loopback_mode;
+ };
+ __le32 words[64];
+};
+
+/**
+ * Port Status information
+ * @status: link status (enum port_oper_status)
+ * @id: port id
+ * @speed: link speed (in Mbps)
+ * @xcvr: tranceiver status
+ */
+struct ionic_port_status {
+ __le32 id;
+ __le32 speed;
+ u8 status;
+ u8 rsvd[51];
+ struct ionic_xcvr_status xcvr;
+};
+
+/**
+ * struct port_identify_cmd - Port identify command
+ * @opcode: opcode
+ * @index: port index
+ * @ver: Highest version of identify supported by driver
+ */
+struct ionic_port_identify_cmd {
+ u8 opcode;
+ u8 index;
+ u8 ver;
+ u8 rsvd[61];
+};
+
+/**
+ * struct port_identify_comp - Port identify command completion
+ * @status: The status of the command (enum status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_port_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+/**
+ * struct port_init_cmd - Port initialization command
+ * @opcode: opcode
+ * @index: port index
+ * @info_pa: destination address for port info (struct port_info)
+ */
+struct ionic_port_init_cmd {
+ u8 opcode;
+ u8 index;
+ u8 rsvd[6];
+ __le64 info_pa;
+ u8 rsvd2[48];
+};
+
+/**
+ * struct port_init_comp - Port initialization command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_port_init_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+/**
+ * struct port_reset_cmd - Port reset command
+ * @opcode: opcode
+ * @index: port index
+ */
+struct ionic_port_reset_cmd {
+ u8 opcode;
+ u8 index;
+ u8 rsvd[62];
+};
+
+/**
+ * struct port_reset_comp - Port reset command completion
+ * @status: The status of the command (enum status_code)
+ */
+struct ionic_port_reset_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+/**
+ * enum stats_ctl_cmd - List of commands for stats control
+ */
+enum ionic_stats_ctl_cmd {
+ IONIC_STATS_CTL_RESET = 0,
+};
+
+
+/**
+ * enum ionic_port_attr - List of device attributes
+ */
+enum ionic_port_attr {
+ IONIC_PORT_ATTR_STATE = 0,
+ IONIC_PORT_ATTR_SPEED = 1,
+ IONIC_PORT_ATTR_MTU = 2,
+ IONIC_PORT_ATTR_AUTONEG = 3,
+ IONIC_PORT_ATTR_FEC = 4,
+ IONIC_PORT_ATTR_PAUSE = 5,
+ IONIC_PORT_ATTR_LOOPBACK = 6,
+ IONIC_PORT_ATTR_STATS_CTRL = 7,
+};
+
+/**
+ * struct port_setattr_cmd - Set port attributes on the NIC
+ * @opcode: Opcode
+ * @index: port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ */
+struct ionic_port_setattr_cmd {
+ u8 opcode;
+ u8 index;
+ u8 attr;
+ u8 rsvd;
+ union {
+ u8 state;
+ __le32 speed;
+ __le32 mtu;
+ u8 an_enable;
+ u8 fec_type;
+ u8 pause_type;
+ u8 loopback_mode;
+ u8 stats_ctl;
+ u8 rsvd2[60];
+ };
+};
+
+/**
+ * struct port_setattr_comp - Port set attr command completion
+ * @status: The status of the command (enum status_code)
+ * @color: Color bit
+ */
+struct ionic_port_setattr_comp {
+ u8 status;
+ u8 rsvd[14];
+ u8 color;
+};
+
+/**
+ * struct port_getattr_cmd - Get port attributes from the NIC
+ * @opcode: Opcode
+ * @index: port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ */
+struct ionic_port_getattr_cmd {
+ u8 opcode;
+ u8 index;
+ u8 attr;
+ u8 rsvd[61];
+};
+
+/**
+ * struct port_getattr_comp - Port get attr command completion
+ * @status: The status of the command (enum status_code)
+ * @color: Color bit
+ */
+struct ionic_port_getattr_comp {
+ u8 status;
+ u8 rsvd[3];
+ union {
+ u8 state;
+ __le32 speed;
+ __le32 mtu;
+ u8 an_enable;
+ u8 fec_type;
+ u8 pause_type;
+ u8 loopback_mode;
+ u8 rsvd2[11];
+ };
+ u8 color;
+};
+
+/**
+ * struct lif_status - Lif status register
+ * @eid: most recent NotifyQ event id
+ * @port_num: port the lif is connected to
+ * @link_status: port status (enum port_oper_status)
+ * @link_speed: speed of link in Mbps
+ * @link_down_count: number of times link status changes
+ */
+struct ionic_lif_status {
+ __le64 eid;
+ u8 port_num;
+ u8 rsvd;
+ __le16 link_status;
+ __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */
+ __le16 link_down_count;
+ u8 rsvd2[46];
+};
+
+/**
+ * struct lif_reset_cmd - LIF reset command
+ * @opcode: opcode
+ * @index: LIF index
+ */
+struct ionic_lif_reset_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 index;
+ __le32 rsvd2[15];
+};
+
+typedef struct ionic_admin_comp ionic_lif_reset_comp;
+
+enum ionic_dev_state {
+ IONIC_DEV_DISABLE = 0,
+ IONIC_DEV_ENABLE = 1,
+ IONIC_DEV_HANG_RESET = 2,
+};
+
+/**
+ * enum dev_attr - List of device attributes
+ */
+enum ionic_dev_attr {
+ IONIC_DEV_ATTR_STATE = 0,
+ IONIC_DEV_ATTR_NAME = 1,
+ IONIC_DEV_ATTR_FEATURES = 2,
+};
+
+/**
+ * struct dev_setattr_cmd - Set Device attributes on the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum dev_attr)
+ * @state: Device state (enum dev_state)
+ * @name: The bus info, e.g. PCI slot-device-function, 0 terminated
+ * @features: Device features
+ */
+struct ionic_dev_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 rsvd;
+ union {
+ u8 state;
+ char name[IONIC_IFNAMSIZ];
+ __le64 features;
+ u8 rsvd2[60];
+ };
+};
+
+/**
+ * struct dev_setattr_comp - Device set attr command completion
+ * @status: The status of the command (enum status_code)
+ * @features: Device features
+ * @color: Color bit
+ */
+struct ionic_dev_setattr_comp {
+ u8 status;
+ u8 rsvd[3];
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ };
+ u8 color;
+};
+
+/**
+ * struct dev_getattr_cmd - Get Device attributes from the NIC
+ * @opcode: opcode
+ * @attr: Attribute type (enum dev_attr)
+ */
+struct ionic_dev_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ u8 rsvd[62];
+};
+
+/**
+ * struct dev_setattr_comp - Device set attr command completion
+ * @status: The status of the command (enum status_code)
+ * @features: Device features
+ * @color: Color bit
+ */
+struct ionic_dev_getattr_comp {
+ u8 status;
+ u8 rsvd[3];
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ };
+ u8 color;
+};
+
+/**
+ * RSS parameters
+ */
+#define IONIC_RSS_HASH_KEY_SIZE 40
+
+enum ionic_rss_hash_types {
+ IONIC_RSS_TYPE_IPV4 = BIT(0),
+ IONIC_RSS_TYPE_IPV4_TCP = BIT(1),
+ IONIC_RSS_TYPE_IPV4_UDP = BIT(2),
+ IONIC_RSS_TYPE_IPV6 = BIT(3),
+ IONIC_RSS_TYPE_IPV6_TCP = BIT(4),
+ IONIC_RSS_TYPE_IPV6_UDP = BIT(5),
+};
+
+/**
+ * enum lif_attr - List of LIF attributes
+ */
+enum ionic_lif_attr {
+ IONIC_LIF_ATTR_STATE = 0,
+ IONIC_LIF_ATTR_NAME = 1,
+ IONIC_LIF_ATTR_MTU = 2,
+ IONIC_LIF_ATTR_MAC = 3,
+ IONIC_LIF_ATTR_FEATURES = 4,
+ IONIC_LIF_ATTR_RSS = 5,
+ IONIC_LIF_ATTR_STATS_CTRL = 6,
+};
+
+/**
+ * struct lif_setattr_cmd - Set LIF attributes on the NIC
+ * @opcode: Opcode
+ * @type: Attribute type (enum lif_attr)
+ * @index: LIF index
+ * @state: lif state (enum lif_state)
+ * @name: The netdev name string, 0 terminated
+ * @mtu: Mtu
+ * @mac: Station mac
+ * @features: Features (enum eth_hw_features)
+ * @rss: RSS properties
+ * @types: The hash types to enable (see rss_hash_types).
+ * @key: The hash secret key.
+ * @addr: Address for the indirection table shared memory.
+ * @stats_ctl: stats control commands (enum stats_ctl_cmd)
+ */
+struct ionic_lif_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 index;
+ union {
+ u8 state;
+ char name[IONIC_IFNAMSIZ];
+ __le32 mtu;
+ u8 mac[6];
+ __le64 features;
+ struct {
+ __le16 types;
+ u8 key[IONIC_RSS_HASH_KEY_SIZE];
+ u8 rsvd[6];
+ __le64 addr;
+ } rss;
+ u8 stats_ctl;
+ u8 rsvd[60];
+ };
+};
+
+/**
+ * struct lif_setattr_comp - LIF set attr command completion
+ * @status: The status of the command (enum status_code)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @features: features (enum eth_hw_features)
+ * @color: Color bit
+ */
+struct ionic_lif_setattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ };
+ u8 color;
+};
+
+/**
+ * struct lif_getattr_cmd - Get LIF attributes from the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum lif_attr)
+ * @index: LIF index
+ */
+struct ionic_lif_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 index;
+ u8 rsvd[60];
+};
+
+/**
+ * struct lif_getattr_comp - LIF get attr command completion
+ * @status: The status of the command (enum status_code)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @state: lif state (enum lif_state)
+ * @name: The netdev name string, 0 terminated
+ * @mtu: Mtu
+ * @mac: Station mac
+ * @features: Features (enum eth_hw_features)
+ * @color: Color bit
+ */
+struct ionic_lif_getattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ u8 state;
+ __le32 mtu;
+ u8 mac[6];
+ __le64 features;
+ u8 rsvd2[11];
+ };
+ u8 color;
+};
+
+enum ionic_rx_mode {
+ IONIC_RX_MODE_F_UNICAST = BIT(0),
+ IONIC_RX_MODE_F_MULTICAST = BIT(1),
+ IONIC_RX_MODE_F_BROADCAST = BIT(2),
+ IONIC_RX_MODE_F_PROMISC = BIT(3),
+ IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+};
+
+/**
+ * struct rx_mode_set_cmd - Set LIF's Rx mode command
+ * @opcode: opcode
+ * @lif_index: LIF index
+ * @rx_mode: Rx mode flags:
+ * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets.
+ * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets.
+ * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets.
+ * IONIC_RX_MODE_F_PROMISC: Accept any packets.
+ * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets.
+ */
+struct ionic_rx_mode_set_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ __le16 rx_mode;
+ __le16 rsvd2[29];
+};
+
+typedef struct ionic_admin_comp ionic_rx_mode_set_comp;
+
+enum ionic_rx_filter_match_type {
+ IONIC_RX_FILTER_MATCH_VLAN = 0,
+ IONIC_RX_FILTER_MATCH_MAC,
+ IONIC_RX_FILTER_MATCH_MAC_VLAN,
+};
+
+/**
+ * struct rx_filter_add_cmd - Add LIF Rx filter command
+ * @opcode: opcode
+ * @qtype: Queue type
+ * @lif_index: LIF index
+ * @qid: Queue ID
+ * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx)
+ * @vlan: VLAN ID
+ * @addr: MAC address (network-byte order)
+ */
+struct ionic_rx_filter_add_cmd {
+ u8 opcode;
+ u8 qtype;
+ __le16 lif_index;
+ __le32 qid;
+ __le16 match;
+ union {
+ struct {
+ __le16 vlan;
+ } vlan;
+ struct {
+ u8 addr[6];
+ } mac;
+ struct {
+ __le16 vlan;
+ u8 addr[6];
+ } mac_vlan;
+ u8 rsvd[54];
+ };
+};
+
+/**
+ * struct rx_filter_add_comp - Add LIF Rx filter command completion
+ * @status: The status of the command (enum status_code)
+ * @comp_index: The index in the descriptor ring for which this
+ * is the completion.
+ * @filter_id: Filter ID
+ * @color: Color bit.
+ */
+struct ionic_rx_filter_add_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 filter_id;
+ u8 rsvd2[7];
+ u8 color;
+};
+
+/**
+ * struct rx_filter_del_cmd - Delete LIF Rx filter command
+ * @opcode: opcode
+ * @lif_index: LIF index
+ * @filter_id: Filter ID
+ */
+struct ionic_rx_filter_del_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ __le32 filter_id;
+ u8 rsvd2[56];
+};
+
+typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
+
+/**
+ * struct qos_identify_cmd - QoS identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ *
+ */
+struct ionic_qos_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct qos_identify_comp - QoS identify command completion
+ * @status: The status of the command (enum status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_qos_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+#define IONIC_QOS_CLASS_MAX 7
+#define IONIC_QOS_CLASS_NAME_SZ 32
+#define IONIC_QOS_DSCP_MAX_VALUES 64
+
+/**
+ * enum qos_class
+ */
+enum ionic_qos_class {
+ IONIC_QOS_CLASS_DEFAULT = 0,
+ IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
+ IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
+ IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
+ IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
+ IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
+ IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
+};
+
+/**
+ * enum qos_class_type - Traffic classification criteria
+ */
+enum ionic_qos_class_type {
+ IONIC_QOS_CLASS_TYPE_NONE = 0,
+ IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */
+ IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */
+};
+
+/**
+ * enum qos_sched_type - Qos class scheduling type
+ */
+enum ionic_qos_sched_type {
+ IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
+ IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
+};
+
+/**
+ * union qos_config - Qos configuration structure
+ * @flags: Configuration flags
+ * IONIC_QOS_CONFIG_F_ENABLE enable
+ * IONIC_QOS_CONFIG_F_DROP drop/nodrop
+ * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
+ * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
+ * @sched_type: Qos class scheduling type (enum qos_sched_type)
+ * @class_type: Qos class type (enum qos_class_type)
+ * @pause_type: Qos pause type (enum qos_pause_type)
+ * @name: Qos class name
+ * @mtu: MTU of the class
+ * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
+ * @dwrr_weight: Qos class scheduling weight
+ * @strict_rlmt: Rate limit for strict priority scheduling
+ * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
+ * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
+ * @dot1q_pcp: Dot1q pcp value
+ * @ndscp: Number of valid dscp values in the ip_dscp field
+ * @ip_dscp: IP dscp values
+ */
+union ionic_qos_config {
+ struct {
+#define IONIC_QOS_CONFIG_F_ENABLE BIT(0)
+#define IONIC_QOS_CONFIG_F_DROP BIT(1)
+#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2)
+#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3)
+ u8 flags;
+ u8 sched_type;
+ u8 class_type;
+ u8 pause_type;
+ char name[IONIC_QOS_CLASS_NAME_SZ];
+ __le32 mtu;
+ /* flow control */
+ u8 pfc_cos;
+ /* scheduler */
+ union {
+ u8 dwrr_weight;
+ __le64 strict_rlmt;
+ };
+ /* marking */
+ union {
+ u8 rw_dot1q_pcp;
+ u8 rw_ip_dscp;
+ };
+ /* classification */
+ union {
+ u8 dot1q_pcp;
+ struct {
+ u8 ndscp;
+ u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES];
+ };
+ };
+ };
+ __le32 words[64];
+};
+
+/**
+ * union qos_identity - QoS identity structure
+ * @version: Version of the identify structure
+ * @type: QoS system type
+ * @nclasses: Number of usable QoS classes
+ * @config: Current configuration of classes
+ */
+union ionic_qos_identity {
+ struct {
+ u8 version;
+ u8 type;
+ u8 rsvd[62];
+ union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
+ };
+ __le32 words[512];
+};
+
+/**
+ * struct qos_init_cmd - QoS config init command
+ * @opcode: Opcode
+ * @group: Qos class id
+ * @info_pa: destination address for qos info
+ */
+struct ionic_qos_init_cmd {
+ u8 opcode;
+ u8 group;
+ u8 rsvd[6];
+ __le64 info_pa;
+ u8 rsvd1[48];
+};
+
+typedef struct ionic_admin_comp ionic_qos_init_comp;
+
+/**
+ * struct qos_reset_cmd - Qos config reset command
+ * @opcode: Opcode
+ */
+struct ionic_qos_reset_cmd {
+ u8 opcode;
+ u8 group;
+ u8 rsvd[62];
+};
+
+typedef struct ionic_admin_comp ionic_qos_reset_comp;
+
+/**
+ * struct fw_download_cmd - Firmware download command
+ * @opcode: opcode
+ * @addr: dma address of the firmware buffer
+ * @offset: offset of the firmware buffer within the full image
+ * @length: number of valid bytes in the firmware buffer
+ */
+struct ionic_fw_download_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 offset;
+ __le64 addr;
+ __le32 length;
+};
+
+typedef struct ionic_admin_comp ionic_fw_download_comp;
+
+enum ionic_fw_control_oper {
+ IONIC_FW_RESET = 0, /* Reset firmware */
+ IONIC_FW_INSTALL = 1, /* Install firmware */
+ IONIC_FW_ACTIVATE = 2, /* Activate firmware */
+};
+
+/**
+ * struct fw_control_cmd - Firmware control command
+ * @opcode: opcode
+ * @oper: firmware control operation (enum fw_control_oper)
+ * @slot: slot to activate
+ */
+struct ionic_fw_control_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ u8 oper;
+ u8 slot;
+ u8 rsvd1[58];
+};
+
+/**
+ * struct fw_control_comp - Firmware control copletion
+ * @opcode: opcode
+ * @slot: slot where the firmware was installed
+ */
+struct ionic_fw_control_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 slot;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+/******************************************************************
+ ******************* RDMA Commands ********************************
+ ******************************************************************/
+
+/**
+ * struct rdma_reset_cmd - Reset RDMA LIF cmd
+ * @opcode: opcode
+ * @lif_index: lif index
+ *
+ * There is no rdma specific dev command completion struct. Completion uses
+ * the common struct admin_comp. Only the status is indicated. Nonzero status
+ * means the LIF does not support rdma.
+ **/
+struct ionic_rdma_reset_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ u8 rsvd2[60];
+};
+
+/**
+ * struct rdma_queue_cmd - Create RDMA Queue command
+ * @opcode: opcode, 52, 53
+ * @lif_index lif index
+ * @qid_ver: (qid | (rdma version << 24))
+ * @cid: intr, eq_id, or cq_id
+ * @dbid: doorbell page id
+ * @depth_log2: log base two of queue depth
+ * @stride_log2: log base two of queue stride
+ * @dma_addr: address of the queue memory
+ * @xxx_table_index: temporary, but should not need pgtbl for contig. queues.
+ *
+ * The same command struct is used to create an rdma event queue, completion
+ * queue, or rdma admin queue. The cid is an interrupt number for an event
+ * queue, an event queue id for a completion queue, or a completion queue id
+ * for an rdma admin queue.
+ *
+ * The queue created via a dev command must be contiguous in dma space.
+ *
+ * The dev commands are intended only to be used during driver initialization,
+ * to create queues supporting the rdma admin queue. Other queues, and other
+ * types of rdma resources like memory regions, will be created and registered
+ * via the rdma admin queue, and will support a more complete interface
+ * providing scatter gather lists for larger, scattered queue buffers and
+ * memory registration.
+ *
+ * There is no rdma specific dev command completion struct. Completion uses
+ * the common struct admin_comp. Only the status is indicated.
+ **/
+struct ionic_rdma_queue_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_index;
+ __le32 qid_ver;
+ __le32 cid;
+ __le16 dbid;
+ u8 depth_log2;
+ u8 stride_log2;
+ __le64 dma_addr;
+ u8 rsvd2[36];
+ __le32 xxx_table_index;
+};
+
+/******************************************************************
+ ******************* Notify Events ********************************
+ ******************************************************************/
+
+/**
+ * struct notifyq_event
+ * @eid: event number
+ * @ecode: event code
+ * @data: unspecified data about the event
+ *
+ * This is the generic event report struct from which the other
+ * actual events will be formed.
+ */
+struct ionic_notifyq_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 data[54];
+};
+
+/**
+ * struct link_change_event
+ * @eid: event number
+ * @ecode: event code = EVENT_OPCODE_LINK_CHANGE
+ * @link_status: link up or down, with error bits (enum port_status)
+ * @link_speed: speed of the network link
+ *
+ * Sent when the network link state changes between UP and DOWN
+ */
+struct ionic_link_change_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 link_status;
+ __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */
+ u8 rsvd[48];
+};
+
+/**
+ * struct reset_event
+ * @eid: event number
+ * @ecode: event code = EVENT_OPCODE_RESET
+ * @reset_code: reset type
+ * @state: 0=pending, 1=complete, 2=error
+ *
+ * Sent when the NIC or some subsystem is going to be or
+ * has been reset.
+ */
+struct ionic_reset_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 reset_code;
+ u8 state;
+ u8 rsvd[52];
+};
+
+/**
+ * struct heartbeat_event
+ * @eid: event number
+ * @ecode: event code = EVENT_OPCODE_HEARTBEAT
+ *
+ * Sent periodically by the NIC to indicate continued health
+ */
+struct ionic_heartbeat_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 rsvd[54];
+};
+
+/**
+ * struct log_event
+ * @eid: event number
+ * @ecode: event code = EVENT_OPCODE_LOG
+ * @data: log data
+ *
+ * Sent to notify the driver of an internal error.
+ */
+struct ionic_log_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 data[54];
+};
+
+/**
+ * struct port_stats
+ */
+struct ionic_port_stats {
+ __le64 frames_rx_ok;
+ __le64 frames_rx_all;
+ __le64 frames_rx_bad_fcs;
+ __le64 frames_rx_bad_all;
+ __le64 octets_rx_ok;
+ __le64 octets_rx_all;
+ __le64 frames_rx_unicast;
+ __le64 frames_rx_multicast;
+ __le64 frames_rx_broadcast;
+ __le64 frames_rx_pause;
+ __le64 frames_rx_bad_length;
+ __le64 frames_rx_undersized;
+ __le64 frames_rx_oversized;
+ __le64 frames_rx_fragments;
+ __le64 frames_rx_jabber;
+ __le64 frames_rx_pripause;
+ __le64 frames_rx_stomped_crc;
+ __le64 frames_rx_too_long;
+ __le64 frames_rx_vlan_good;
+ __le64 frames_rx_dropped;
+ __le64 frames_rx_less_than_64b;
+ __le64 frames_rx_64b;
+ __le64 frames_rx_65b_127b;
+ __le64 frames_rx_128b_255b;
+ __le64 frames_rx_256b_511b;
+ __le64 frames_rx_512b_1023b;
+ __le64 frames_rx_1024b_1518b;
+ __le64 frames_rx_1519b_2047b;
+ __le64 frames_rx_2048b_4095b;
+ __le64 frames_rx_4096b_8191b;
+ __le64 frames_rx_8192b_9215b;
+ __le64 frames_rx_other;
+ __le64 frames_tx_ok;
+ __le64 frames_tx_all;
+ __le64 frames_tx_bad;
+ __le64 octets_tx_ok;
+ __le64 octets_tx_total;
+ __le64 frames_tx_unicast;
+ __le64 frames_tx_multicast;
+ __le64 frames_tx_broadcast;
+ __le64 frames_tx_pause;
+ __le64 frames_tx_pripause;
+ __le64 frames_tx_vlan;
+ __le64 frames_tx_less_than_64b;
+ __le64 frames_tx_64b;
+ __le64 frames_tx_65b_127b;
+ __le64 frames_tx_128b_255b;
+ __le64 frames_tx_256b_511b;
+ __le64 frames_tx_512b_1023b;
+ __le64 frames_tx_1024b_1518b;
+ __le64 frames_tx_1519b_2047b;
+ __le64 frames_tx_2048b_4095b;
+ __le64 frames_tx_4096b_8191b;
+ __le64 frames_tx_8192b_9215b;
+ __le64 frames_tx_other;
+ __le64 frames_tx_pri_0;
+ __le64 frames_tx_pri_1;
+ __le64 frames_tx_pri_2;
+ __le64 frames_tx_pri_3;
+ __le64 frames_tx_pri_4;
+ __le64 frames_tx_pri_5;
+ __le64 frames_tx_pri_6;
+ __le64 frames_tx_pri_7;
+ __le64 frames_rx_pri_0;
+ __le64 frames_rx_pri_1;
+ __le64 frames_rx_pri_2;
+ __le64 frames_rx_pri_3;
+ __le64 frames_rx_pri_4;
+ __le64 frames_rx_pri_5;
+ __le64 frames_rx_pri_6;
+ __le64 frames_rx_pri_7;
+ __le64 tx_pripause_0_1us_count;
+ __le64 tx_pripause_1_1us_count;
+ __le64 tx_pripause_2_1us_count;
+ __le64 tx_pripause_3_1us_count;
+ __le64 tx_pripause_4_1us_count;
+ __le64 tx_pripause_5_1us_count;
+ __le64 tx_pripause_6_1us_count;
+ __le64 tx_pripause_7_1us_count;
+ __le64 rx_pripause_0_1us_count;
+ __le64 rx_pripause_1_1us_count;
+ __le64 rx_pripause_2_1us_count;
+ __le64 rx_pripause_3_1us_count;
+ __le64 rx_pripause_4_1us_count;
+ __le64 rx_pripause_5_1us_count;
+ __le64 rx_pripause_6_1us_count;
+ __le64 rx_pripause_7_1us_count;
+ __le64 rx_pause_1us_count;
+ __le64 frames_tx_truncated;
+};
+
+struct ionic_mgmt_port_stats {
+ __le64 frames_rx_ok;
+ __le64 frames_rx_all;
+ __le64 frames_rx_bad_fcs;
+ __le64 frames_rx_bad_all;
+ __le64 octets_rx_ok;
+ __le64 octets_rx_all;
+ __le64 frames_rx_unicast;
+ __le64 frames_rx_multicast;
+ __le64 frames_rx_broadcast;
+ __le64 frames_rx_pause;
+ __le64 frames_rx_bad_length0;
+ __le64 frames_rx_undersized1;
+ __le64 frames_rx_oversized2;
+ __le64 frames_rx_fragments3;
+ __le64 frames_rx_jabber4;
+ __le64 frames_rx_64b5;
+ __le64 frames_rx_65b_127b6;
+ __le64 frames_rx_128b_255b7;
+ __le64 frames_rx_256b_511b8;
+ __le64 frames_rx_512b_1023b9;
+ __le64 frames_rx_1024b_1518b0;
+ __le64 frames_rx_gt_1518b1;
+ __le64 frames_rx_fifo_full2;
+ __le64 frames_tx_ok3;
+ __le64 frames_tx_all4;
+ __le64 frames_tx_bad5;
+ __le64 octets_tx_ok6;
+ __le64 octets_tx_total7;
+ __le64 frames_tx_unicast8;
+ __le64 frames_tx_multicast9;
+ __le64 frames_tx_broadcast0;
+ __le64 frames_tx_pause1;
+};
+
+/**
+ * struct port_identity - port identity structure
+ * @version: identity structure version
+ * @type: type of port (enum port_type)
+ * @num_lanes: number of lanes for the port
+ * @autoneg: autoneg supported
+ * @min_frame_size: minimum frame size supported
+ * @max_frame_size: maximum frame size supported
+ * @fec_type: supported fec types
+ * @pause_type: supported pause types
+ * @loopback_mode: supported loopback mode
+ * @speeds: supported speeds
+ * @config: current port configuration
+ */
+union ionic_port_identity {
+ struct {
+ u8 version;
+ u8 type;
+ u8 num_lanes;
+ u8 autoneg;
+ __le32 min_frame_size;
+ __le32 max_frame_size;
+ u8 fec_type[4];
+ u8 pause_type[2];
+ u8 loopback_mode[2];
+ __le32 speeds[16];
+ u8 rsvd2[44];
+ union ionic_port_config config;
+ };
+ __le32 words[512];
+};
+
+/**
+ * struct port_info - port info structure
+ * @port_status: port status
+ * @port_stats: port stats
+ */
+struct ionic_port_info {
+ union ionic_port_config config;
+ struct ionic_port_status status;
+ struct ionic_port_stats stats;
+};
+
+/**
+ * struct lif_stats
+ */
+struct ionic_lif_stats {
+ /* RX */
+ __le64 rx_ucast_bytes;
+ __le64 rx_ucast_packets;
+ __le64 rx_mcast_bytes;
+ __le64 rx_mcast_packets;
+ __le64 rx_bcast_bytes;
+ __le64 rx_bcast_packets;
+ __le64 rsvd0;
+ __le64 rsvd1;
+ /* RX drops */
+ __le64 rx_ucast_drop_bytes;
+ __le64 rx_ucast_drop_packets;
+ __le64 rx_mcast_drop_bytes;
+ __le64 rx_mcast_drop_packets;
+ __le64 rx_bcast_drop_bytes;
+ __le64 rx_bcast_drop_packets;
+ __le64 rx_dma_error;
+ __le64 rsvd2;
+ /* TX */
+ __le64 tx_ucast_bytes;
+ __le64 tx_ucast_packets;
+ __le64 tx_mcast_bytes;
+ __le64 tx_mcast_packets;
+ __le64 tx_bcast_bytes;
+ __le64 tx_bcast_packets;
+ __le64 rsvd3;
+ __le64 rsvd4;
+ /* TX drops */
+ __le64 tx_ucast_drop_bytes;
+ __le64 tx_ucast_drop_packets;
+ __le64 tx_mcast_drop_bytes;
+ __le64 tx_mcast_drop_packets;
+ __le64 tx_bcast_drop_bytes;
+ __le64 tx_bcast_drop_packets;
+ __le64 tx_dma_error;
+ __le64 rsvd5;
+ /* Rx Queue/Ring drops */
+ __le64 rx_queue_disabled;
+ __le64 rx_queue_empty;
+ __le64 rx_queue_error;
+ __le64 rx_desc_fetch_error;
+ __le64 rx_desc_data_error;
+ __le64 rsvd6;
+ __le64 rsvd7;
+ __le64 rsvd8;
+ /* Tx Queue/Ring drops */
+ __le64 tx_queue_disabled;
+ __le64 tx_queue_error;
+ __le64 tx_desc_fetch_error;
+ __le64 tx_desc_data_error;
+ __le64 rsvd9;
+ __le64 rsvd10;
+ __le64 rsvd11;
+ __le64 rsvd12;
+
+ /* RDMA/ROCE TX */
+ __le64 tx_rdma_ucast_bytes;
+ __le64 tx_rdma_ucast_packets;
+ __le64 tx_rdma_mcast_bytes;
+ __le64 tx_rdma_mcast_packets;
+ __le64 tx_rdma_cnp_packets;
+ __le64 rsvd13;
+ __le64 rsvd14;
+ __le64 rsvd15;
+
+ /* RDMA/ROCE RX */
+ __le64 rx_rdma_ucast_bytes;
+ __le64 rx_rdma_ucast_packets;
+ __le64 rx_rdma_mcast_bytes;
+ __le64 rx_rdma_mcast_packets;
+ __le64 rx_rdma_cnp_packets;
+ __le64 rx_rdma_ecn_packets;
+ __le64 rsvd16;
+ __le64 rsvd17;
+
+ __le64 rsvd18;
+ __le64 rsvd19;
+ __le64 rsvd20;
+ __le64 rsvd21;
+ __le64 rsvd22;
+ __le64 rsvd23;
+ __le64 rsvd24;
+ __le64 rsvd25;
+
+ __le64 rsvd26;
+ __le64 rsvd27;
+ __le64 rsvd28;
+ __le64 rsvd29;
+ __le64 rsvd30;
+ __le64 rsvd31;
+ __le64 rsvd32;
+ __le64 rsvd33;
+
+ __le64 rsvd34;
+ __le64 rsvd35;
+ __le64 rsvd36;
+ __le64 rsvd37;
+ __le64 rsvd38;
+ __le64 rsvd39;
+ __le64 rsvd40;
+ __le64 rsvd41;
+
+ __le64 rsvd42;
+ __le64 rsvd43;
+ __le64 rsvd44;
+ __le64 rsvd45;
+ __le64 rsvd46;
+ __le64 rsvd47;
+ __le64 rsvd48;
+ __le64 rsvd49;
+
+ /* RDMA/ROCE REQ Error/Debugs (768 - 895) */
+ __le64 rdma_req_rx_pkt_seq_err;
+ __le64 rdma_req_rx_rnr_retry_err;
+ __le64 rdma_req_rx_remote_access_err;
+ __le64 rdma_req_rx_remote_inv_req_err;
+ __le64 rdma_req_rx_remote_oper_err;
+ __le64 rdma_req_rx_implied_nak_seq_err;
+ __le64 rdma_req_rx_cqe_err;
+ __le64 rdma_req_rx_cqe_flush_err;
+
+ __le64 rdma_req_rx_dup_responses;
+ __le64 rdma_req_rx_invalid_packets;
+ __le64 rdma_req_tx_local_access_err;
+ __le64 rdma_req_tx_local_oper_err;
+ __le64 rdma_req_tx_memory_mgmt_err;
+ __le64 rsvd52;
+ __le64 rsvd53;
+ __le64 rsvd54;
+
+ /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */
+ __le64 rdma_resp_rx_dup_requests;
+ __le64 rdma_resp_rx_out_of_buffer;
+ __le64 rdma_resp_rx_out_of_seq_pkts;
+ __le64 rdma_resp_rx_cqe_err;
+ __le64 rdma_resp_rx_cqe_flush_err;
+ __le64 rdma_resp_rx_local_len_err;
+ __le64 rdma_resp_rx_inv_request_err;
+ __le64 rdma_resp_rx_local_qp_oper_err;
+
+ __le64 rdma_resp_rx_out_of_atomic_resource;
+ __le64 rdma_resp_tx_pkt_seq_err;
+ __le64 rdma_resp_tx_remote_inv_req_err;
+ __le64 rdma_resp_tx_remote_access_err;
+ __le64 rdma_resp_tx_remote_oper_err;
+ __le64 rdma_resp_tx_rnr_retry_err;
+ __le64 rsvd57;
+ __le64 rsvd58;
+};
+
+/**
+ * struct lif_info - lif info structure
+ */
+struct ionic_lif_info {
+ union ionic_lif_config config;
+ struct ionic_lif_status status;
+ struct ionic_lif_stats stats;
+};
+
+union ionic_dev_cmd {
+ u32 words[16];
+ struct ionic_admin_cmd cmd;
+ struct ionic_nop_cmd nop;
+
+ struct ionic_dev_identify_cmd identify;
+ struct ionic_dev_init_cmd init;
+ struct ionic_dev_reset_cmd reset;
+ struct ionic_dev_getattr_cmd getattr;
+ struct ionic_dev_setattr_cmd setattr;
+
+ struct ionic_port_identify_cmd port_identify;
+ struct ionic_port_init_cmd port_init;
+ struct ionic_port_reset_cmd port_reset;
+ struct ionic_port_getattr_cmd port_getattr;
+ struct ionic_port_setattr_cmd port_setattr;
+
+ struct ionic_lif_identify_cmd lif_identify;
+ struct ionic_lif_init_cmd lif_init;
+ struct ionic_lif_reset_cmd lif_reset;
+
+ struct ionic_qos_identify_cmd qos_identify;
+ struct ionic_qos_init_cmd qos_init;
+ struct ionic_qos_reset_cmd qos_reset;
+
+ struct ionic_q_init_cmd q_init;
+};
+
+union ionic_dev_cmd_comp {
+ u32 words[4];
+ u8 status;
+ struct ionic_admin_comp comp;
+ struct ionic_nop_comp nop;
+
+ struct ionic_dev_identify_comp identify;
+ struct ionic_dev_init_comp init;
+ struct ionic_dev_reset_comp reset;
+ struct ionic_dev_getattr_comp getattr;
+ struct ionic_dev_setattr_comp setattr;
+
+ struct ionic_port_identify_comp port_identify;
+ struct ionic_port_init_comp port_init;
+ struct ionic_port_reset_comp port_reset;
+ struct ionic_port_getattr_comp port_getattr;
+ struct ionic_port_setattr_comp port_setattr;
+
+ struct ionic_lif_identify_comp lif_identify;
+ struct ionic_lif_init_comp lif_init;
+ ionic_lif_reset_comp lif_reset;
+
+ struct ionic_qos_identify_comp qos_identify;
+ ionic_qos_init_comp qos_init;
+ ionic_qos_reset_comp qos_reset;
+
+ struct ionic_q_init_comp q_init;
+};
+
+/**
+ * union dev_info - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI').
+ * @version: Current version of info.
+ * @asic_type: Asic type.
+ * @asic_rev: Asic revision.
+ * @fw_status: Firmware status.
+ * @fw_heartbeat: Firmware heartbeat counter.
+ * @serial_num: Serial number.
+ * @fw_version: Firmware version.
+ */
+union ionic_dev_info_regs {
+#define IONIC_DEVINFO_FWVERS_BUFLEN 32
+#define IONIC_DEVINFO_SERIAL_BUFLEN 32
+ struct {
+ u32 signature;
+ u8 version;
+ u8 asic_type;
+ u8 asic_rev;
+ u8 fw_status;
+ u32 fw_heartbeat;
+ char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
+ char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN];
+ };
+ u32 words[512];
+};
+
+/**
+ * union dev_cmd_regs - Device command register format (read-write)
+ * @doorbell: Device Cmd Doorbell, write-only.
+ * Write a 1 to signal device to process cmd,
+ * poll done for completion.
+ * @done: Done indicator, bit 0 == 1 when command is complete.
+ * @cmd: Opcode-specific command bytes
+ * @comp: Opcode-specific response bytes
+ * @data: Opcode-specific side-data
+ */
+union ionic_dev_cmd_regs {
+ struct {
+ u32 doorbell;
+ u32 done;
+ union ionic_dev_cmd cmd;
+ union ionic_dev_cmd_comp comp;
+ u8 rsvd[48];
+ u32 data[478];
+ };
+ u32 words[512];
+};
+
+/**
+ * union dev_regs - Device register format in for bar 0 page 0
+ * @info: Device info registers
+ * @devcmd: Device command registers
+ */
+union ionic_dev_regs {
+ struct {
+ union ionic_dev_info_regs info;
+ union ionic_dev_cmd_regs devcmd;
+ };
+ __le32 words[1024];
+};
+
+union ionic_adminq_cmd {
+ struct ionic_admin_cmd cmd;
+ struct ionic_nop_cmd nop;
+ struct ionic_q_init_cmd q_init;
+ struct ionic_q_control_cmd q_control;
+ struct ionic_lif_setattr_cmd lif_setattr;
+ struct ionic_lif_getattr_cmd lif_getattr;
+ struct ionic_rx_mode_set_cmd rx_mode_set;
+ struct ionic_rx_filter_add_cmd rx_filter_add;
+ struct ionic_rx_filter_del_cmd rx_filter_del;
+ struct ionic_rdma_reset_cmd rdma_reset;
+ struct ionic_rdma_queue_cmd rdma_queue;
+ struct ionic_fw_download_cmd fw_download;
+ struct ionic_fw_control_cmd fw_control;
+};
+
+union ionic_adminq_comp {
+ struct ionic_admin_comp comp;
+ struct ionic_nop_comp nop;
+ struct ionic_q_init_comp q_init;
+ struct ionic_lif_setattr_comp lif_setattr;
+ struct ionic_lif_getattr_comp lif_getattr;
+ struct ionic_rx_filter_add_comp rx_filter_add;
+ struct ionic_fw_control_comp fw_control;
+};
+
+#define IONIC_BARS_MAX 6
+#define IONIC_PCI_BAR_DBELL 1
+
+/* BAR0 */
+#define IONIC_BAR0_SIZE 0x8000
+
+#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
+#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
+#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
+#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000
+#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
+#define IONIC_DEV_CMD_DONE 0x00000001
+
+#define IONIC_ASIC_TYPE_CAPRI 0
+
+/**
+ * struct doorbell - Doorbell register layout
+ * @p_index: Producer index
+ * @ring: Selects the specific ring of the queue to update.
+ * Type-specific meaning:
+ * ring=0: Default producer/consumer queue.
+ * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs
+ * send events to EQs when armed. EQs send
+ * interrupts when armed.
+ * @qid: The queue id selects the queue destination for the
+ * producer index and flags.
+ */
+struct ionic_doorbell {
+ __le16 p_index;
+ u8 ring;
+ u8 qid_lo;
+ __le16 qid_hi;
+ u16 rsvd2;
+};
+
+struct ionic_intr_status {
+ u32 status[2];
+};
+
+struct ionic_notifyq_cmd {
+ __le32 data; /* Not used but needed for qcq structure */
+};
+
+union ionic_notifyq_comp {
+ struct ionic_notifyq_event event;
+ struct ionic_link_change_event link_change;
+ struct ionic_reset_event reset;
+ struct ionic_heartbeat_event heartbeat;
+ struct ionic_log_event log;
+};
+
+/* Deprecate */
+struct ionic_identity {
+ union ionic_drv_identity drv;
+ union ionic_dev_identity dev;
+ union ionic_lif_identity lif;
+ union ionic_port_identity port;
+ union ionic_qos_identity qos;
+};
+
+#pragma pack(pop)
+
+#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
new file mode 100644
index 000000000000..db7c82742828
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -0,0 +1,2274 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/cpumask.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_txrx.h"
+#include "ionic_ethtool.h"
+#include "ionic_debugfs.h"
+
+static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
+static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
+static void ionic_link_status_check(struct ionic_lif *lif);
+
+static void ionic_lif_deferred_work(struct work_struct *work)
+{
+ struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
+ struct ionic_deferred *def = &lif->deferred;
+ struct ionic_deferred_work *w = NULL;
+
+ spin_lock_bh(&def->lock);
+ if (!list_empty(&def->list)) {
+ w = list_first_entry(&def->list,
+ struct ionic_deferred_work, list);
+ list_del(&w->list);
+ }
+ spin_unlock_bh(&def->lock);
+
+ if (w) {
+ switch (w->type) {
+ case IONIC_DW_TYPE_RX_MODE:
+ ionic_lif_rx_mode(lif, w->rx_mode);
+ break;
+ case IONIC_DW_TYPE_RX_ADDR_ADD:
+ ionic_lif_addr_add(lif, w->addr);
+ break;
+ case IONIC_DW_TYPE_RX_ADDR_DEL:
+ ionic_lif_addr_del(lif, w->addr);
+ break;
+ case IONIC_DW_TYPE_LINK_STATUS:
+ ionic_link_status_check(lif);
+ break;
+ default:
+ break;
+ }
+ kfree(w);
+ schedule_work(&def->work);
+ }
+}
+
+static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+ struct ionic_deferred_work *work)
+{
+ spin_lock_bh(&def->lock);
+ list_add_tail(&work->list, &def->list);
+ spin_unlock_bh(&def->lock);
+ schedule_work(&def->work);
+}
+
+static void ionic_link_status_check(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+ u16 link_status;
+ bool link_up;
+
+ link_status = le16_to_cpu(lif->info->status.link_status);
+ link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
+
+ /* filter out the no-change cases */
+ if (link_up == netif_carrier_ok(netdev))
+ goto link_out;
+
+ if (link_up) {
+ netdev_info(netdev, "Link up - %d Gbps\n",
+ le32_to_cpu(lif->info->status.link_speed) / 1000);
+
+ if (test_bit(IONIC_LIF_UP, lif->state)) {
+ netif_tx_wake_all_queues(lif->netdev);
+ netif_carrier_on(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link down\n");
+
+ /* carrier off first to avoid watchdog timeout */
+ netif_carrier_off(netdev);
+ if (test_bit(IONIC_LIF_UP, lif->state))
+ netif_tx_stop_all_queues(netdev);
+ }
+
+link_out:
+ clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
+}
+
+static void ionic_link_status_check_request(struct ionic_lif *lif)
+{
+ struct ionic_deferred_work *work;
+
+ /* we only need one request outstanding at a time */
+ if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
+ return;
+
+ if (in_interrupt()) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->type = IONIC_DW_TYPE_LINK_STATUS;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_link_status_check(lif);
+ }
+}
+
+static irqreturn_t ionic_isr(int irq, void *data)
+{
+ struct napi_struct *napi = data;
+
+ napi_schedule_irqoff(napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct ionic_intr_info *intr = &qcq->intr;
+ struct device *dev = lif->ionic->dev;
+ struct ionic_queue *q = &qcq->q;
+ const char *name;
+
+ if (lif->registered)
+ name = lif->netdev->name;
+ else
+ name = dev_name(dev);
+
+ snprintf(intr->name, sizeof(intr->name),
+ "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
+
+ return devm_request_irq(dev, intr->vector, ionic_isr,
+ 0, intr->name, &qcq->napi);
+}
+
+static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
+{
+ struct ionic *ionic = lif->ionic;
+ int index;
+
+ index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
+ if (index == ionic->nintrs) {
+ netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
+ __func__, index, ionic->nintrs);
+ return -ENOSPC;
+ }
+
+ set_bit(index, ionic->intrs);
+ ionic_intr_init(&ionic->idev, intr, index);
+
+ return 0;
+}
+
+static void ionic_intr_free(struct ionic_lif *lif, int index)
+{
+ if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
+ clear_bit(index, lif->ionic->intrs);
+}
+
+static int ionic_qcq_enable(struct ionic_qcq *qcq)
+{
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_lif *lif = q->lif;
+ struct ionic_dev *idev;
+ struct device *dev;
+
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.q_control = {
+ .opcode = IONIC_CMD_Q_CONTROL,
+ .lif_index = cpu_to_le16(lif->index),
+ .type = q->type,
+ .index = cpu_to_le32(q->index),
+ .oper = IONIC_Q_ENABLE,
+ },
+ };
+
+ idev = &lif->ionic->idev;
+ dev = lif->ionic->dev;
+
+ dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
+ napi_enable(&qcq->napi);
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_CLEAR);
+ }
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_qcq_disable(struct ionic_qcq *qcq)
+{
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_lif *lif = q->lif;
+ struct ionic_dev *idev;
+ struct device *dev;
+
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.q_control = {
+ .opcode = IONIC_CMD_Q_CONTROL,
+ .lif_index = cpu_to_le16(lif->index),
+ .type = q->type,
+ .index = cpu_to_le32(q->index),
+ .oper = IONIC_Q_DISABLE,
+ },
+ };
+
+ idev = &lif->ionic->idev;
+ dev = lif->ionic->dev;
+
+ dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_SET);
+ synchronize_irq(qcq->intr.vector);
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ napi_disable(&qcq->napi);
+ }
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct device *dev = lif->ionic->dev;
+
+ if (!qcq)
+ return;
+
+ ionic_debugfs_del_qcq(qcq);
+
+ if (!(qcq->flags & IONIC_QCQ_F_INITED))
+ return;
+
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_SET);
+ devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
+ netif_napi_del(&qcq->napi);
+ }
+
+ qcq->flags &= ~IONIC_QCQ_F_INITED;
+}
+
+static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct device *dev = lif->ionic->dev;
+
+ if (!qcq)
+ return;
+
+ dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
+ qcq->base = NULL;
+ qcq->base_pa = 0;
+
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_free(lif, qcq->intr.index);
+
+ devm_kfree(dev, qcq->cq.info);
+ qcq->cq.info = NULL;
+ devm_kfree(dev, qcq->q.info);
+ qcq->q.info = NULL;
+ devm_kfree(dev, qcq);
+}
+
+static void ionic_qcqs_free(struct ionic_lif *lif)
+{
+ struct device *dev = lif->ionic->dev;
+ unsigned int i;
+
+ if (lif->notifyqcq) {
+ ionic_qcq_free(lif, lif->notifyqcq);
+ lif->notifyqcq = NULL;
+ }
+
+ if (lif->adminqcq) {
+ ionic_qcq_free(lif, lif->adminqcq);
+ lif->adminqcq = NULL;
+ }
+
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->rxqcqs[i].stats)
+ devm_kfree(dev, lif->rxqcqs[i].stats);
+
+ devm_kfree(dev, lif->rxqcqs);
+ lif->rxqcqs = NULL;
+
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->txqcqs[i].stats)
+ devm_kfree(dev, lif->txqcqs[i].stats);
+
+ devm_kfree(dev, lif->txqcqs);
+ lif->txqcqs = NULL;
+}
+
+static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
+ struct ionic_qcq *n_qcq)
+{
+ if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
+ ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
+ n_qcq->flags &= ~IONIC_QCQ_F_INTR;
+ }
+
+ n_qcq->intr.vector = src_qcq->intr.vector;
+ n_qcq->intr.index = src_qcq->intr.index;
+}
+
+static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
+ unsigned int index,
+ const char *name, unsigned int flags,
+ unsigned int num_descs, unsigned int desc_size,
+ unsigned int cq_desc_size,
+ unsigned int sg_desc_size,
+ unsigned int pid, struct ionic_qcq **qcq)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ u32 q_size, cq_size, sg_size, total_size;
+ struct device *dev = lif->ionic->dev;
+ void *q_base, *cq_base, *sg_base;
+ dma_addr_t cq_base_pa = 0;
+ dma_addr_t sg_base_pa = 0;
+ dma_addr_t q_base_pa = 0;
+ struct ionic_qcq *new;
+ int err;
+
+ *qcq = NULL;
+
+ q_size = num_descs * desc_size;
+ cq_size = num_descs * cq_desc_size;
+ sg_size = num_descs * sg_desc_size;
+
+ total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
+ /* Note: aligning q_size/cq_size is not enough due to cq_base
+ * address aligning as q_base could be not aligned to the page.
+ * Adding PAGE_SIZE.
+ */
+ total_size += PAGE_SIZE;
+ if (flags & IONIC_QCQ_F_SG) {
+ total_size += ALIGN(sg_size, PAGE_SIZE);
+ total_size += PAGE_SIZE;
+ }
+
+ new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ netdev_err(lif->netdev, "Cannot allocate queue structure\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ new->flags = flags;
+
+ new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
+ GFP_KERNEL);
+ if (!new->q.info) {
+ netdev_err(lif->netdev, "Cannot allocate queue info\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ new->q.type = type;
+
+ err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
+ desc_size, sg_desc_size, pid);
+ if (err) {
+ netdev_err(lif->netdev, "Cannot initialize queue\n");
+ goto err_out;
+ }
+
+ if (flags & IONIC_QCQ_F_INTR) {
+ err = ionic_intr_alloc(lif, &new->intr);
+ if (err) {
+ netdev_warn(lif->netdev, "no intr for %s: %d\n",
+ name, err);
+ goto err_out;
+ }
+
+ err = ionic_bus_get_irq(lif->ionic, new->intr.index);
+ if (err < 0) {
+ netdev_warn(lif->netdev, "no vector for %s: %d\n",
+ name, err);
+ goto err_out_free_intr;
+ }
+ new->intr.vector = err;
+ ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
+ IONIC_INTR_MASK_SET);
+
+ new->intr.cpu = new->intr.index % num_online_cpus();
+ if (cpu_online(new->intr.cpu))
+ cpumask_set_cpu(new->intr.cpu,
+ &new->intr.affinity_mask);
+ } else {
+ new->intr.index = INTR_INDEX_NOT_ASSIGNED;
+ }
+
+ new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
+ GFP_KERNEL);
+ if (!new->cq.info) {
+ netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
+ err = -ENOMEM;
+ goto err_out_free_intr;
+ }
+
+ err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
+ if (err) {
+ netdev_err(lif->netdev, "Cannot initialize completion queue\n");
+ goto err_out_free_intr;
+ }
+
+ new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
+ GFP_KERNEL);
+ if (!new->base) {
+ netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_intr;
+ }
+
+ new->total_size = total_size;
+
+ q_base = new->base;
+ q_base_pa = new->base_pa;
+
+ cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
+ cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
+
+ if (flags & IONIC_QCQ_F_SG) {
+ sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
+ PAGE_SIZE);
+ sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+ ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
+ }
+
+ ionic_q_map(&new->q, q_base, q_base_pa);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
+
+ *qcq = new;
+
+ return 0;
+
+err_out_free_intr:
+ ionic_intr_free(lif, new->intr.index);
+err_out:
+ dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
+ return err;
+}
+
+static int ionic_qcqs_alloc(struct ionic_lif *lif)
+{
+ struct device *dev = lif->ionic->dev;
+ unsigned int q_list_size;
+ unsigned int flags;
+ int err;
+ int i;
+
+ flags = IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
+ IONIC_ADMINQ_LENGTH,
+ sizeof(struct ionic_admin_cmd),
+ sizeof(struct ionic_admin_comp),
+ 0, lif->kern_pid, &lif->adminqcq);
+ if (err)
+ return err;
+
+ if (lif->ionic->nnqs_per_lif) {
+ flags = IONIC_QCQ_F_NOTIFYQ;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
+ flags, IONIC_NOTIFYQ_LENGTH,
+ sizeof(struct ionic_notifyq_cmd),
+ sizeof(union ionic_notifyq_comp),
+ 0, lif->kern_pid, &lif->notifyqcq);
+ if (err)
+ goto err_out_free_adminqcq;
+
+ /* Let the notifyq ride on the adminq interrupt */
+ ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
+ }
+
+ q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
+ err = -ENOMEM;
+ lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ if (!lif->txqcqs)
+ goto err_out_free_notifyqcq;
+ for (i = 0; i < lif->nxqs; i++) {
+ lif->txqcqs[i].stats = devm_kzalloc(dev,
+ sizeof(struct ionic_q_stats),
+ GFP_KERNEL);
+ if (!lif->txqcqs[i].stats)
+ goto err_out_free_tx_stats;
+ }
+
+ lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ if (!lif->rxqcqs)
+ goto err_out_free_tx_stats;
+ for (i = 0; i < lif->nxqs; i++) {
+ lif->rxqcqs[i].stats = devm_kzalloc(dev,
+ sizeof(struct ionic_q_stats),
+ GFP_KERNEL);
+ if (!lif->rxqcqs[i].stats)
+ goto err_out_free_rx_stats;
+ }
+
+ return 0;
+
+err_out_free_rx_stats:
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->rxqcqs[i].stats)
+ devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqcqs);
+ lif->rxqcqs = NULL;
+err_out_free_tx_stats:
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->txqcqs[i].stats)
+ devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqcqs);
+ lif->txqcqs = NULL;
+err_out_free_notifyqcq:
+ if (lif->notifyqcq) {
+ ionic_qcq_free(lif, lif->notifyqcq);
+ lif->notifyqcq = NULL;
+ }
+err_out_free_adminqcq:
+ ionic_qcq_free(lif, lif->adminqcq);
+ lif->adminqcq = NULL;
+
+ return err;
+}
+
+static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct device *dev = lif->ionic->dev;
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_cq *cq = &qcq->cq;
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.q_init = {
+ .opcode = IONIC_CMD_Q_INIT,
+ .lif_index = cpu_to_le16(lif->index),
+ .type = q->type,
+ .index = cpu_to_le32(q->index),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_SG),
+ .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
+ .pid = cpu_to_le16(q->pid),
+ .ring_size = ilog2(q->num_descs),
+ .ring_base = cpu_to_le64(q->base_pa),
+ .cq_ring_base = cpu_to_le64(cq->base_pa),
+ .sg_ring_base = cpu_to_le64(q->sg_base_pa),
+ },
+ };
+ int err;
+
+ dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
+ dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
+ dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
+ dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ q->hw_type = ctx.comp.q_init.hw_type;
+ q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
+ q->dbval = IONIC_DBELL_QID(q->hw_index);
+
+ dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
+ dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
+
+ qcq->flags |= IONIC_QCQ_F_INITED;
+
+ ionic_debugfs_add_qcq(lif, qcq);
+
+ return 0;
+}
+
+static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ struct device *dev = lif->ionic->dev;
+ struct ionic_queue *q = &qcq->q;
+ struct ionic_cq *cq = &qcq->cq;
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.q_init = {
+ .opcode = IONIC_CMD_Q_INIT,
+ .lif_index = cpu_to_le16(lif->index),
+ .type = q->type,
+ .index = cpu_to_le32(q->index),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
+ .intr_index = cpu_to_le16(cq->bound_intr->index),
+ .pid = cpu_to_le16(q->pid),
+ .ring_size = ilog2(q->num_descs),
+ .ring_base = cpu_to_le64(q->base_pa),
+ .cq_ring_base = cpu_to_le64(cq->base_pa),
+ },
+ };
+ int err;
+
+ dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
+ dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
+ dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
+ dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ q->hw_type = ctx.comp.q_init.hw_type;
+ q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
+ q->dbval = IONIC_DBELL_QID(q->hw_index);
+
+ dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
+ dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
+ NAPI_POLL_WEIGHT);
+
+ err = ionic_request_irq(lif, qcq);
+ if (err) {
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
+
+ qcq->flags |= IONIC_QCQ_F_INITED;
+
+ ionic_debugfs_add_qcq(lif, qcq);
+
+ return 0;
+}
+
+static bool ionic_notifyq_service(struct ionic_cq *cq,
+ struct ionic_cq_info *cq_info)
+{
+ union ionic_notifyq_comp *comp = cq_info->cq_desc;
+ struct net_device *netdev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ u64 eid;
+
+ q = cq->bound_q;
+ lif = q->info[0].cb_arg;
+ netdev = lif->netdev;
+ eid = le64_to_cpu(comp->event.eid);
+
+ /* Have we run out of new completions to process? */
+ if (eid <= lif->last_eid)
+ return false;
+
+ lif->last_eid = eid;
+
+ dev_dbg(lif->ionic->dev, "notifyq event:\n");
+ dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ switch (le16_to_cpu(comp->event.ecode)) {
+ case IONIC_EVENT_LINK_CHANGE:
+ ionic_link_status_check_request(lif);
+ break;
+ case IONIC_EVENT_RESET:
+ netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
+ eid);
+ netdev_info(netdev, " reset_code=%d state=%d\n",
+ comp->reset.reset_code,
+ comp->reset.state);
+ break;
+ default:
+ netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
+ comp->event.ecode, eid);
+ break;
+ }
+
+ return true;
+}
+
+static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct ionic_cq *cq = &lif->notifyqcq->cq;
+ u32 work_done;
+
+ work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
+ NULL, NULL);
+ if (work_done)
+ ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ work_done, IONIC_INTR_CRED_RESET_COALESCE);
+
+ return work_done;
+}
+
+static bool ionic_adminq_service(struct ionic_cq *cq,
+ struct ionic_cq_info *cq_info)
+{
+ struct ionic_admin_comp *comp = cq_info->cq_desc;
+
+ if (!color_match(comp->color, cq->done_color))
+ return false;
+
+ ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
+
+ return true;
+}
+
+static int ionic_adminq_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_lif *lif = napi_to_cq(napi)->lif;
+ int n_work = 0;
+ int a_work = 0;
+
+ if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
+ n_work = ionic_notifyq_clean(lif, budget);
+ a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
+
+ return max(n_work, a_work);
+}
+
+static void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_lif_stats *ls;
+
+ memset(ns, 0, sizeof(*ns));
+ ls = &lif->info->stats;
+
+ ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
+ le64_to_cpu(ls->rx_mcast_packets) +
+ le64_to_cpu(ls->rx_bcast_packets);
+
+ ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
+ le64_to_cpu(ls->tx_mcast_packets) +
+ le64_to_cpu(ls->tx_bcast_packets);
+
+ ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
+ le64_to_cpu(ls->rx_mcast_bytes) +
+ le64_to_cpu(ls->rx_bcast_bytes);
+
+ ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
+ le64_to_cpu(ls->tx_mcast_bytes) +
+ le64_to_cpu(ls->tx_bcast_bytes);
+
+ ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
+ le64_to_cpu(ls->rx_mcast_drop_packets) +
+ le64_to_cpu(ls->rx_bcast_drop_packets);
+
+ ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
+ le64_to_cpu(ls->tx_mcast_drop_packets) +
+ le64_to_cpu(ls->tx_bcast_drop_packets);
+
+ ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
+
+ ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
+
+ ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
+ le64_to_cpu(ls->rx_queue_disabled) +
+ le64_to_cpu(ls->rx_desc_fetch_error) +
+ le64_to_cpu(ls->rx_desc_data_error);
+
+ ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
+ le64_to_cpu(ls->tx_queue_disabled) +
+ le64_to_cpu(ls->tx_desc_fetch_error) +
+ le64_to_cpu(ls->tx_desc_data_error);
+
+ ns->rx_errors = ns->rx_over_errors +
+ ns->rx_missed_errors;
+
+ ns->tx_errors = ns->tx_aborted_errors;
+}
+
+static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_add = {
+ .opcode = IONIC_CMD_RX_FILTER_ADD,
+ .lif_index = cpu_to_le16(lif->index),
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int err;
+
+ /* don't bother if we already have it */
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_by_addr(lif, addr);
+ spin_unlock_bh(&lif->rx_filters.lock);
+ if (f)
+ return 0;
+
+ netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
+ ctx.comp.rx_filter_add.filter_id);
+
+ memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
+}
+
+static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_by_addr(lif, addr);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
+ ctx.cmd.rx_filter_del.filter_id);
+
+ return 0;
+}
+
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
+{
+ struct ionic *ionic = lif->ionic;
+ struct ionic_deferred_work *work;
+ unsigned int nmfilters;
+ unsigned int nufilters;
+
+ if (add) {
+ /* Do we have space for this filter? We test the counters
+ * here before checking the need for deferral so that we
+ * can return an overflow error to the stack.
+ */
+ nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
+ nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
+
+ if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
+ lif->nmcast++;
+ else if (!is_multicast_ether_addr(addr) &&
+ lif->nucast < nufilters)
+ lif->nucast++;
+ else
+ return -ENOSPC;
+ } else {
+ if (is_multicast_ether_addr(addr) && lif->nmcast)
+ lif->nmcast--;
+ else if (!is_multicast_ether_addr(addr) && lif->nucast)
+ lif->nucast--;
+ }
+
+ if (in_interrupt()) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "%s OOM\n", __func__);
+ return -ENOMEM;
+ }
+ work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
+ IONIC_DW_TYPE_RX_ADDR_DEL;
+ memcpy(work->addr, addr, ETH_ALEN);
+ netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
+ add ? "add" : "del", addr);
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+ add ? "add" : "del", addr);
+ if (add)
+ return ionic_lif_addr_add(lif, addr);
+ else
+ return ionic_lif_addr_del(lif, addr);
+ }
+
+ return 0;
+}
+
+static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, true);
+}
+
+static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, false);
+}
+
+static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_mode_set = {
+ .opcode = IONIC_CMD_RX_MODE_SET,
+ .lif_index = cpu_to_le16(lif->index),
+ .rx_mode = cpu_to_le16(rx_mode),
+ },
+ };
+ char buf[128];
+ int err;
+ int i;
+#define REMAIN(__x) (sizeof(buf) - (__x))
+
+ i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+ lif->rx_mode, rx_mode);
+ if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+ i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+ if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+ i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+ if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+ i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+ if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+ i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+ if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+ i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+ netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
+ rx_mode, err);
+ else
+ lif->rx_mode = rx_mode;
+}
+
+static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
+{
+ struct ionic_deferred_work *work;
+
+ if (in_interrupt()) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "%s OOM\n", __func__);
+ return;
+ }
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ work->rx_mode = rx_mode;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif, rx_mode);
+ }
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_identity *ident;
+ unsigned int nfilters;
+ unsigned int rx_mode;
+
+ ident = &lif->ionic->ident;
+
+ rx_mode = IONIC_RX_MODE_F_UNICAST;
+ rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+ rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+ rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+ rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+
+ /* sync unicast addresses
+ * next check to see if we're in an overflow state
+ * if so, we track that we overflowed and enable NIC PROMISC
+ * else if the overflow is set and not needed
+ * we remove our overflow flag and check the netdev flags
+ * to see if we can disable NIC PROMISC
+ */
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
+ if (netdev_uc_count(netdev) + 1 > nfilters) {
+ rx_mode |= IONIC_RX_MODE_F_PROMISC;
+ lif->uc_overflow = true;
+ } else if (lif->uc_overflow) {
+ lif->uc_overflow = false;
+ if (!(netdev->flags & IFF_PROMISC))
+ rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
+ }
+
+ /* same for multicast */
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
+ if (netdev_mc_count(netdev) > nfilters) {
+ rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
+ lif->mc_overflow = true;
+ } else if (lif->mc_overflow) {
+ lif->mc_overflow = false;
+ if (!(netdev->flags & IFF_ALLMULTI))
+ rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
+ }
+
+ if (lif->rx_mode != rx_mode)
+ _ionic_lif_rx_mode(lif, rx_mode);
+}
+
+static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
+{
+ u64 wanted = 0;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_TX)
+ wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
+ if (features & NETIF_F_RXHASH)
+ wanted |= IONIC_ETH_HW_RX_HASH;
+ if (features & NETIF_F_RXCSUM)
+ wanted |= IONIC_ETH_HW_RX_CSUM;
+ if (features & NETIF_F_SG)
+ wanted |= IONIC_ETH_HW_TX_SG;
+ if (features & NETIF_F_HW_CSUM)
+ wanted |= IONIC_ETH_HW_TX_CSUM;
+ if (features & NETIF_F_TSO)
+ wanted |= IONIC_ETH_HW_TSO;
+ if (features & NETIF_F_TSO6)
+ wanted |= IONIC_ETH_HW_TSO_IPV6;
+ if (features & NETIF_F_TSO_ECN)
+ wanted |= IONIC_ETH_HW_TSO_ECN;
+ if (features & NETIF_F_GSO_GRE)
+ wanted |= IONIC_ETH_HW_TSO_GRE;
+ if (features & NETIF_F_GSO_GRE_CSUM)
+ wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
+ if (features & NETIF_F_GSO_IPXIP4)
+ wanted |= IONIC_ETH_HW_TSO_IPXIP4;
+ if (features & NETIF_F_GSO_IPXIP6)
+ wanted |= IONIC_ETH_HW_TSO_IPXIP6;
+ if (features & NETIF_F_GSO_UDP_TUNNEL)
+ wanted |= IONIC_ETH_HW_TSO_UDP;
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
+ wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
+
+ return cpu_to_le64(wanted);
+}
+
+static int ionic_set_nic_features(struct ionic_lif *lif,
+ netdev_features_t features)
+{
+ struct device *dev = lif->ionic->dev;
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_FEATURES,
+ },
+ };
+ u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
+ IONIC_ETH_HW_VLAN_RX_STRIP |
+ IONIC_ETH_HW_VLAN_RX_FILTER;
+ int err;
+
+ ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
+ ctx.comp.lif_setattr.features);
+
+ if ((vlan_flags & features) &&
+ !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
+ dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
+
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
+ dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
+ dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
+ dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
+ if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
+ dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
+ if (lif->hw_features & IONIC_ETH_HW_TX_SG)
+ dev_dbg(dev, "feature ETH_HW_TX_SG\n");
+ if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
+ dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
+ if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
+ dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO)
+ dev_dbg(dev, "feature ETH_HW_TSO\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
+ dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
+ dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
+ dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
+ dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
+ dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
+ dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
+ dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
+ if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
+ dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
+
+ return 0;
+}
+
+static int ionic_init_nic_features(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+ netdev_features_t features;
+ int err;
+
+ /* set up what we expect to support by default */
+ features = NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_TSO_ECN;
+
+ err = ionic_set_nic_features(lif, features);
+ if (err)
+ return err;
+
+ /* tell the netdev what we actually can support */
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
+ netdev->hw_features |= NETIF_F_RXHASH;
+ if (lif->hw_features & IONIC_ETH_HW_TX_SG)
+ netdev->hw_features |= NETIF_F_SG;
+
+ if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ if (lif->hw_features & IONIC_ETH_HW_TSO)
+ netdev->hw_enc_features |= NETIF_F_TSO;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
+ netdev->hw_enc_features |= NETIF_F_TSO6;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
+ netdev->hw_enc_features |= NETIF_F_TSO_ECN;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
+ netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
+ netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->hw_features |= netdev->hw_enc_features;
+ netdev->features |= netdev->hw_features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ return 0;
+}
+
+static int ionic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int err;
+
+ netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
+ __func__, (u64)lif->netdev->features, (u64)features);
+
+ err = ionic_set_nic_features(lif, features);
+
+ return err;
+}
+
+static int ionic_set_mac_address(struct net_device *netdev, void *sa)
+{
+ struct sockaddr *addr = sa;
+ u8 *mac;
+ int err;
+
+ mac = (u8 *)addr->sa_data;
+ if (ether_addr_equal(netdev->dev_addr, mac))
+ return 0;
+
+ err = eth_prepare_mac_addr_change(netdev, addr);
+ if (err)
+ return err;
+
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ netdev_info(netdev, "deleting mac addr %pM\n",
+ netdev->dev_addr);
+ ionic_addr_del(netdev, netdev->dev_addr);
+ }
+
+ eth_commit_mac_addr_change(netdev, addr);
+ netdev_info(netdev, "updating mac addr %pM\n", mac);
+
+ return ionic_addr_add(netdev, mac);
+}
+
+static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MTU,
+ .mtu = cpu_to_le32(new_mtu),
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ netdev->mtu = new_mtu;
+ err = ionic_reset_queues(lif);
+
+ return err;
+}
+
+static void ionic_tx_timeout_work(struct work_struct *ws)
+{
+ struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
+
+ netdev_info(lif->netdev, "Tx Timeout recovery\n");
+
+ rtnl_lock();
+ ionic_reset_queues(lif);
+ rtnl_unlock();
+}
+
+static void ionic_tx_timeout(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ schedule_work(&lif->tx_timeout_work);
+}
+
+static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_add = {
+ .opcode = IONIC_CMD_RX_FILTER_ADD,
+ .lif_index = cpu_to_le16(lif->index),
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
+ ctx.comp.rx_filter_add.filter_id);
+
+ return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
+}
+
+static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ f = ionic_rx_filter_by_vlan(lif, vid);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
+ le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
+
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
+ const u8 *key, const u32 *indir)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .attr = IONIC_LIF_ATTR_RSS,
+ .rss.types = cpu_to_le16(types),
+ .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
+ },
+ };
+ unsigned int i, tbl_sz;
+
+ lif->rss_types = types;
+
+ if (key)
+ memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
+
+ if (indir) {
+ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
+ for (i = 0; i < tbl_sz; i++)
+ lif->rss_ind_tbl[i] = indir[i];
+ }
+
+ memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
+ IONIC_RSS_HASH_KEY_SIZE);
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_lif_rss_init(struct ionic_lif *lif)
+{
+ u8 rss_key[IONIC_RSS_HASH_KEY_SIZE];
+ unsigned int tbl_sz;
+ unsigned int i;
+
+ netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE);
+
+ lif->rss_types = IONIC_RSS_TYPE_IPV4 |
+ IONIC_RSS_TYPE_IPV4_TCP |
+ IONIC_RSS_TYPE_IPV4_UDP |
+ IONIC_RSS_TYPE_IPV6 |
+ IONIC_RSS_TYPE_IPV6_TCP |
+ IONIC_RSS_TYPE_IPV6_UDP;
+
+ /* Fill indirection table with 'default' values */
+ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
+ for (i = 0; i < tbl_sz; i++)
+ lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
+
+ return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL);
+}
+
+static int ionic_lif_rss_deinit(struct ionic_lif *lif)
+{
+ return ionic_lif_rss_config(lif, 0x0, NULL, NULL);
+}
+
+static void ionic_txrx_disable(struct ionic_lif *lif)
+{
+ unsigned int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_disable(lif->txqcqs[i].qcq);
+ ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ }
+}
+
+static void ionic_txrx_deinit(struct ionic_lif *lif)
+{
+ unsigned int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
+
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
+ ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ }
+}
+
+static void ionic_txrx_free(struct ionic_lif *lif)
+{
+ unsigned int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i].qcq);
+ lif->txqcqs[i].qcq = NULL;
+
+ ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
+ lif->rxqcqs[i].qcq = NULL;
+ }
+}
+
+static int ionic_txrx_alloc(struct ionic_lif *lif)
+{
+ unsigned int flags;
+ unsigned int i;
+ int err = 0;
+ u32 coal;
+
+ flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ lif->ntxq_descs,
+ sizeof(struct ionic_txq_desc),
+ sizeof(struct ionic_txq_comp),
+ sizeof(struct ionic_txq_sg_desc),
+ lif->kern_pid, &lif->txqcqs[i].qcq);
+ if (err)
+ goto err_out;
+
+ lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
+ }
+
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
+ coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ lif->nrxq_descs,
+ sizeof(struct ionic_rxq_desc),
+ sizeof(struct ionic_rxq_comp),
+ 0, lif->kern_pid, &lif->rxqcqs[i].qcq);
+ if (err)
+ goto err_out;
+
+ lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[i].qcq->intr.index, coal);
+ ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
+ lif->txqcqs[i].qcq);
+ }
+
+ return 0;
+
+err_out:
+ ionic_txrx_free(lif);
+
+ return err;
+}
+
+static int ionic_txrx_init(struct ionic_lif *lif)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
+ if (err)
+ goto err_out;
+
+ err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
+ if (err) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ goto err_out;
+ }
+ }
+
+ if (lif->netdev->features & NETIF_F_RXHASH)
+ ionic_lif_rss_init(lif);
+
+ ionic_set_rx_mode(lif->netdev);
+
+ return 0;
+
+err_out:
+ while (i--) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ }
+
+ return err;
+}
+
+static int ionic_txrx_enable(struct ionic_lif *lif)
+{
+ int i, err;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ if (err)
+ goto err_out;
+
+ ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ if (err) {
+ ionic_qcq_disable(lif->txqcqs[i].qcq);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ while (i--) {
+ ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ ionic_qcq_disable(lif->txqcqs[i].qcq);
+ }
+
+ return err;
+}
+
+int ionic_open(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int err;
+
+ netif_carrier_off(netdev);
+
+ err = ionic_txrx_alloc(lif);
+ if (err)
+ return err;
+
+ err = ionic_txrx_init(lif);
+ if (err)
+ goto err_txrx_free;
+
+ err = ionic_txrx_enable(lif);
+ if (err)
+ goto err_txrx_deinit;
+
+ netif_set_real_num_tx_queues(netdev, lif->nxqs);
+ netif_set_real_num_rx_queues(netdev, lif->nxqs);
+
+ set_bit(IONIC_LIF_UP, lif->state);
+
+ ionic_link_status_check_request(lif);
+ if (netif_carrier_ok(netdev))
+ netif_tx_wake_all_queues(netdev);
+
+ return 0;
+
+err_txrx_deinit:
+ ionic_txrx_deinit(lif);
+err_txrx_free:
+ ionic_txrx_free(lif);
+ return err;
+}
+
+int ionic_stop(struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ int err = 0;
+
+ if (!test_bit(IONIC_LIF_UP, lif->state)) {
+ dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
+ __func__, lif->name);
+ return 0;
+ }
+ dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
+ clear_bit(IONIC_LIF_UP, lif->state);
+
+ /* carrier off before disabling queues to avoid watchdog timeout */
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+
+ ionic_txrx_disable(lif);
+ ionic_txrx_deinit(lif);
+ ionic_txrx_free(lif);
+
+ return err;
+}
+
+static const struct net_device_ops ionic_netdev_ops = {
+ .ndo_open = ionic_open,
+ .ndo_stop = ionic_stop,
+ .ndo_start_xmit = ionic_start_xmit,
+ .ndo_get_stats64 = ionic_get_stats64,
+ .ndo_set_rx_mode = ionic_set_rx_mode,
+ .ndo_set_features = ionic_set_features,
+ .ndo_set_mac_address = ionic_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = ionic_tx_timeout,
+ .ndo_change_mtu = ionic_change_mtu,
+ .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
+};
+
+int ionic_reset_queues(struct ionic_lif *lif)
+{
+ bool running;
+ int err = 0;
+
+ /* Put off the next watchdog timeout */
+ netif_trans_update(lif->netdev);
+
+ if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
+ return -EBUSY;
+
+ running = netif_running(lif->netdev);
+ if (running)
+ err = ionic_stop(lif->netdev);
+ if (!err && running)
+ ionic_open(lif->netdev);
+
+ clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+
+ return err;
+}
+
+static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
+{
+ struct device *dev = ionic->dev;
+ struct net_device *netdev;
+ struct ionic_lif *lif;
+ int tbl_sz;
+ u32 coal;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*lif),
+ ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
+ if (!netdev) {
+ dev_err(dev, "Cannot allocate netdev, aborting\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ SET_NETDEV_DEV(netdev, dev);
+
+ lif = netdev_priv(netdev);
+ lif->netdev = netdev;
+ ionic->master_lif = lif;
+ netdev->netdev_ops = &ionic_netdev_ops;
+ ionic_ethtool_set_ops(netdev);
+
+ netdev->watchdog_timeo = 2 * HZ;
+ netdev->min_mtu = IONIC_MIN_MTU;
+ netdev->max_mtu = IONIC_MAX_MTU;
+
+ lif->neqs = ionic->neqs_per_lif;
+ lif->nxqs = ionic->ntxqs_per_lif;
+
+ lif->ionic = ionic;
+ lif->index = index;
+ lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
+ lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+
+ /* Convert the default coalesce value to actual hw resolution */
+ coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
+ lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+
+ snprintf(lif->name, sizeof(lif->name), "lif%u", index);
+
+ spin_lock_init(&lif->adminq_lock);
+
+ spin_lock_init(&lif->deferred.lock);
+ INIT_LIST_HEAD(&lif->deferred.list);
+ INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
+
+ /* allocate lif info */
+ lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
+ lif->info = dma_alloc_coherent(dev, lif->info_sz,
+ &lif->info_pa, GFP_KERNEL);
+ if (!lif->info) {
+ dev_err(dev, "Failed to allocate lif info, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_netdev;
+ }
+
+ /* allocate queues */
+ err = ionic_qcqs_alloc(lif);
+ if (err)
+ goto err_out_free_lif_info;
+
+ /* allocate rss indirection table */
+ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
+ lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
+ lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
+ &lif->rss_ind_tbl_pa,
+ GFP_KERNEL);
+
+ if (!lif->rss_ind_tbl) {
+ dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
+ goto err_out_free_qcqs;
+ }
+
+ list_add_tail(&lif->list, &ionic->lifs);
+
+ return lif;
+
+err_out_free_qcqs:
+ ionic_qcqs_free(lif);
+err_out_free_lif_info:
+ dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
+ lif->info = NULL;
+ lif->info_pa = 0;
+err_out_free_netdev:
+ free_netdev(lif->netdev);
+ lif = NULL;
+
+ return ERR_PTR(err);
+}
+
+int ionic_lifs_alloc(struct ionic *ionic)
+{
+ struct ionic_lif *lif;
+
+ INIT_LIST_HEAD(&ionic->lifs);
+
+ /* only build the first lif, others are for later features */
+ set_bit(0, ionic->lifbits);
+ lif = ionic_lif_alloc(ionic, 0);
+
+ return PTR_ERR_OR_ZERO(lif);
+}
+
+static void ionic_lif_reset(struct ionic_lif *lif)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_lif_reset(idev, lif->index);
+ ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+}
+
+static void ionic_lif_free(struct ionic_lif *lif)
+{
+ struct device *dev = lif->ionic->dev;
+
+ /* free rss indirection table */
+ dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
+ lif->rss_ind_tbl_pa);
+ lif->rss_ind_tbl = NULL;
+ lif->rss_ind_tbl_pa = 0;
+
+ /* free queues */
+ ionic_qcqs_free(lif);
+ ionic_lif_reset(lif);
+
+ /* free lif info */
+ dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
+ lif->info = NULL;
+ lif->info_pa = 0;
+
+ /* unmap doorbell page */
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+ kfree(lif->dbid_inuse);
+ lif->dbid_inuse = NULL;
+
+ /* free netdev & lif */
+ ionic_debugfs_del_lif(lif);
+ list_del(&lif->list);
+ free_netdev(lif->netdev);
+}
+
+void ionic_lifs_free(struct ionic *ionic)
+{
+ struct list_head *cur, *tmp;
+ struct ionic_lif *lif;
+
+ list_for_each_safe(cur, tmp, &ionic->lifs) {
+ lif = list_entry(cur, struct ionic_lif, list);
+
+ ionic_lif_free(lif);
+ }
+}
+
+static void ionic_lif_deinit(struct ionic_lif *lif)
+{
+ if (!test_bit(IONIC_LIF_INITED, lif->state))
+ return;
+
+ clear_bit(IONIC_LIF_INITED, lif->state);
+
+ ionic_rx_filters_deinit(lif);
+ ionic_lif_rss_deinit(lif);
+
+ napi_disable(&lif->adminqcq->napi);
+ ionic_lif_qcq_deinit(lif, lif->notifyqcq);
+ ionic_lif_qcq_deinit(lif, lif->adminqcq);
+
+ ionic_lif_reset(lif);
+}
+
+void ionic_lifs_deinit(struct ionic *ionic)
+{
+ struct list_head *cur, *tmp;
+ struct ionic_lif *lif;
+
+ list_for_each_safe(cur, tmp, &ionic->lifs) {
+ lif = list_entry(cur, struct ionic_lif, list);
+ ionic_lif_deinit(lif);
+ }
+}
+
+static int ionic_lif_adminq_init(struct ionic_lif *lif)
+{
+ struct device *dev = lif->ionic->dev;
+ struct ionic_q_init_comp comp;
+ struct ionic_dev *idev;
+ struct ionic_qcq *qcq;
+ struct ionic_queue *q;
+ int err;
+
+ idev = &lif->ionic->idev;
+ qcq = lif->adminqcq;
+ q = &qcq->q;
+
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
+ err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+ if (err) {
+ netdev_err(lif->netdev, "adminq init failed %d\n", err);
+ return err;
+ }
+
+ q->hw_type = comp.hw_type;
+ q->hw_index = le32_to_cpu(comp.hw_index);
+ q->dbval = IONIC_DBELL_QID(q->hw_index);
+
+ dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
+ dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
+ NAPI_POLL_WEIGHT);
+
+ err = ionic_request_irq(lif, qcq);
+ if (err) {
+ netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
+
+ napi_enable(&qcq->napi);
+
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_CLEAR);
+
+ qcq->flags |= IONIC_QCQ_F_INITED;
+
+ ionic_debugfs_add_qcq(lif, qcq);
+
+ return 0;
+}
+
+static int ionic_lif_notifyq_init(struct ionic_lif *lif)
+{
+ struct ionic_qcq *qcq = lif->notifyqcq;
+ struct device *dev = lif->ionic->dev;
+ struct ionic_queue *q = &qcq->q;
+ int err;
+
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.q_init = {
+ .opcode = IONIC_CMD_Q_INIT,
+ .lif_index = cpu_to_le16(lif->index),
+ .type = q->type,
+ .index = cpu_to_le32(q->index),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_ENA),
+ .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
+ .pid = cpu_to_le16(q->pid),
+ .ring_size = ilog2(q->num_descs),
+ .ring_base = cpu_to_le64(q->base_pa),
+ }
+ };
+
+ dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
+ dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
+ dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
+ dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ q->hw_type = ctx.comp.q_init.hw_type;
+ q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
+ q->dbval = IONIC_DBELL_QID(q->hw_index);
+
+ dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
+ dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
+
+ /* preset the callback info */
+ q->info[0].cb_arg = lif;
+
+ qcq->flags |= IONIC_QCQ_F_INITED;
+
+ ionic_debugfs_add_qcq(lif, qcq);
+
+ return 0;
+}
+
+static int ionic_station_set(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ struct sockaddr addr;
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ addr.sa_family = AF_INET;
+ err = eth_prepare_mac_addr_change(netdev, &addr);
+ if (err)
+ return err;
+
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, false);
+ }
+
+ eth_commit_mac_addr_change(netdev, &addr);
+ netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, true);
+
+ return 0;
+}
+
+static int ionic_lif_init(struct ionic_lif *lif)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct device *dev = lif->ionic->dev;
+ struct ionic_lif_init_comp comp;
+ int dbpage_num;
+ int err;
+
+ ionic_debugfs_add_lif(lif);
+
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
+ err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+ if (err)
+ return err;
+
+ lif->hw_index = le16_to_cpu(comp.hw_index);
+
+ /* now that we have the hw_index we can figure out our doorbell page */
+ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+ if (!lif->dbid_count) {
+ dev_err(dev, "No doorbell pages, aborting\n");
+ return -EINVAL;
+ }
+
+ lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
+ if (!lif->dbid_inuse) {
+ dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
+ return -ENOMEM;
+ }
+
+ /* first doorbell id reserved for kernel (dbid aka pid == zero) */
+ set_bit(0, lif->dbid_inuse);
+ lif->kern_pid = 0;
+
+ dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
+ lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
+ if (!lif->kern_dbpage) {
+ dev_err(dev, "Cannot map dbpage, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_dbid;
+ }
+
+ err = ionic_lif_adminq_init(lif);
+ if (err)
+ goto err_out_adminq_deinit;
+
+ if (lif->ionic->nnqs_per_lif) {
+ err = ionic_lif_notifyq_init(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+ }
+
+ err = ionic_init_nic_features(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+
+ err = ionic_rx_filters_init(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+
+ err = ionic_station_set(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+
+ lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
+
+ set_bit(IONIC_LIF_INITED, lif->state);
+
+ INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
+
+ return 0;
+
+err_out_notifyq_deinit:
+ ionic_lif_qcq_deinit(lif, lif->notifyqcq);
+err_out_adminq_deinit:
+ ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_lif_reset(lif);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+err_out_free_dbid:
+ kfree(lif->dbid_inuse);
+ lif->dbid_inuse = NULL;
+
+ return err;
+}
+
+int ionic_lifs_init(struct ionic *ionic)
+{
+ struct list_head *cur, *tmp;
+ struct ionic_lif *lif;
+ int err;
+
+ list_for_each_safe(cur, tmp, &ionic->lifs) {
+ lif = list_entry(cur, struct ionic_lif, list);
+ err = ionic_lif_init(lif);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ionic_lif_notify_work(struct work_struct *ws)
+{
+}
+
+static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_NAME,
+ },
+ };
+
+ strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ sizeof(ctx.cmd.lif_setattr.name));
+
+ ionic_adminq_post_wait(lif, &ctx);
+}
+
+static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
+{
+ if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
+ return NULL;
+
+ return netdev_priv(netdev);
+}
+
+static int ionic_lif_notify(struct notifier_block *nb,
+ unsigned long event, void *info)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(info);
+ struct ionic *ionic = container_of(nb, struct ionic, nb);
+ struct ionic_lif *lif = ionic_netdev_lif(ndev);
+
+ if (!lif || lif->ionic != ionic)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ ionic_lif_set_netdev_info(lif);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int ionic_lifs_register(struct ionic *ionic)
+{
+ int err;
+
+ INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
+
+ ionic->nb.notifier_call = ionic_lif_notify;
+
+ err = register_netdevice_notifier(&ionic->nb);
+ if (err)
+ ionic->nb.notifier_call = NULL;
+
+ /* only register LIF0 for now */
+ err = register_netdev(ionic->master_lif->netdev);
+ if (err) {
+ dev_err(ionic->dev, "Cannot register net device, aborting\n");
+ return err;
+ }
+
+ ionic_link_status_check_request(ionic->master_lif);
+ ionic->master_lif->registered = true;
+
+ return 0;
+}
+
+void ionic_lifs_unregister(struct ionic *ionic)
+{
+ if (ionic->nb.notifier_call) {
+ unregister_netdevice_notifier(&ionic->nb);
+ cancel_work_sync(&ionic->nb_work);
+ ionic->nb.notifier_call = NULL;
+ }
+
+ /* There is only one lif ever registered in the
+ * current model, so don't bother searching the
+ * ionic->lif for candidates to unregister
+ */
+ cancel_work_sync(&ionic->master_lif->deferred.work);
+ cancel_work_sync(&ionic->master_lif->tx_timeout_work);
+ if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(ionic->master_lif->netdev);
+}
+
+int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
+ union ionic_lif_identity *lid)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ size_t sz;
+ int err;
+
+ sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err)
+ return (err);
+
+ dev_dbg(ionic->dev, "capabilities 0x%llx\n",
+ le64_to_cpu(lid->capabilities));
+
+ dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
+ le32_to_cpu(lid->eth.max_ucast_filters));
+ dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
+ le32_to_cpu(lid->eth.max_mcast_filters));
+ dev_dbg(ionic->dev, "eth.features 0x%llx\n",
+ le64_to_cpu(lid->eth.config.features));
+ dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
+ le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
+ dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
+ le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
+ dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
+ le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
+ dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
+ le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
+ dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
+ dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
+ dev_dbg(ionic->dev, "eth.config.mtu %d\n",
+ le32_to_cpu(lid->eth.config.mtu));
+
+ return 0;
+}
+
+int ionic_lifs_size(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ unsigned int nintrs, dev_nintrs;
+ union ionic_lif_config *lc;
+ unsigned int ntxqs_per_lif;
+ unsigned int nrxqs_per_lif;
+ unsigned int neqs_per_lif;
+ unsigned int nnqs_per_lif;
+ unsigned int nxqs, neqs;
+ unsigned int min_intrs;
+ int err;
+
+ lc = &ident->lif.eth.config;
+ dev_nintrs = le32_to_cpu(ident->dev.nintrs);
+ neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
+ nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
+ ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
+ nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
+
+ nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
+ nxqs = min(nxqs, num_online_cpus());
+ neqs = min(neqs_per_lif, num_online_cpus());
+
+try_again:
+ /* interrupt usage:
+ * 1 for master lif adminq/notifyq
+ * 1 for each CPU for master lif TxRx queue pairs
+ * whatever's left is for RDMA queues
+ */
+ nintrs = 1 + nxqs + neqs;
+ min_intrs = 2; /* adminq + 1 TxRx queue pair */
+
+ if (nintrs > dev_nintrs)
+ goto try_fewer;
+
+ err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
+ if (err < 0 && err != -ENOSPC) {
+ dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
+ return err;
+ }
+ if (err == -ENOSPC)
+ goto try_fewer;
+
+ if (err != nintrs) {
+ ionic_bus_free_irq_vectors(ionic);
+ goto try_fewer;
+ }
+
+ ionic->nnqs_per_lif = nnqs_per_lif;
+ ionic->neqs_per_lif = neqs;
+ ionic->ntxqs_per_lif = nxqs;
+ ionic->nrxqs_per_lif = nxqs;
+ ionic->nintrs = nintrs;
+
+ ionic_debugfs_add_sizes(ionic);
+
+ return 0;
+
+try_fewer:
+ if (nnqs_per_lif > 1) {
+ nnqs_per_lif >>= 1;
+ goto try_again;
+ }
+ if (neqs > 1) {
+ neqs >>= 1;
+ goto try_again;
+ }
+ if (nxqs > 1) {
+ nxqs >>= 1;
+ goto try_again;
+ }
+ dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
+ return -ENOSPC;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
new file mode 100644
index 000000000000..812190e729c2
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_LIF_H_
+#define _IONIC_LIF_H_
+
+#include <linux/pci.h>
+#include "ionic_rx_filter.h"
+
+#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
+#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+
+#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
+#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
+#define IONIC_RX_COPYBREAK_DEFAULT 256
+
+struct ionic_tx_stats {
+ u64 dma_map_err;
+ u64 pkts;
+ u64 bytes;
+ u64 clean;
+ u64 linearize;
+ u64 no_csum;
+ u64 csum;
+ u64 crc32_csum;
+ u64 tso;
+ u64 frags;
+ u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
+};
+
+struct ionic_rx_stats {
+ u64 dma_map_err;
+ u64 alloc_err;
+ u64 pkts;
+ u64 bytes;
+ u64 csum_none;
+ u64 csum_complete;
+ u64 csum_error;
+ u64 buffers_posted;
+};
+
+#define IONIC_QCQ_F_INITED BIT(0)
+#define IONIC_QCQ_F_SG BIT(1)
+#define IONIC_QCQ_F_INTR BIT(2)
+#define IONIC_QCQ_F_TX_STATS BIT(3)
+#define IONIC_QCQ_F_RX_STATS BIT(4)
+#define IONIC_QCQ_F_NOTIFYQ BIT(5)
+
+struct ionic_napi_stats {
+ u64 poll_count;
+ u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
+};
+
+struct ionic_q_stats {
+ union {
+ struct ionic_tx_stats tx;
+ struct ionic_rx_stats rx;
+ };
+};
+
+struct ionic_qcq {
+ void *base;
+ dma_addr_t base_pa;
+ unsigned int total_size;
+ struct ionic_queue q;
+ struct ionic_cq cq;
+ struct ionic_intr_info intr;
+ struct napi_struct napi;
+ struct ionic_napi_stats napi_stats;
+ struct ionic_q_stats *stats;
+ unsigned int flags;
+ struct dentry *dentry;
+};
+
+struct ionic_qcqst {
+ struct ionic_qcq *qcq;
+ struct ionic_q_stats *stats;
+};
+
+#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
+#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
+#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
+#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
+#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
+
+enum ionic_deferred_work_type {
+ IONIC_DW_TYPE_RX_MODE,
+ IONIC_DW_TYPE_RX_ADDR_ADD,
+ IONIC_DW_TYPE_RX_ADDR_DEL,
+ IONIC_DW_TYPE_LINK_STATUS,
+ IONIC_DW_TYPE_LIF_RESET,
+};
+
+struct ionic_deferred_work {
+ struct list_head list;
+ enum ionic_deferred_work_type type;
+ union {
+ unsigned int rx_mode;
+ u8 addr[ETH_ALEN];
+ };
+};
+
+struct ionic_deferred {
+ spinlock_t lock; /* lock for deferred work list */
+ struct list_head list;
+ struct work_struct work;
+};
+
+struct ionic_lif_sw_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_tso;
+ u64 tx_no_csum;
+ u64 tx_csum;
+ u64 rx_csum_none;
+ u64 rx_csum_complete;
+ u64 rx_csum_error;
+};
+
+enum ionic_lif_state_flags {
+ IONIC_LIF_INITED,
+ IONIC_LIF_SW_DEBUG_STATS,
+ IONIC_LIF_UP,
+ IONIC_LIF_LINK_CHECK_REQUESTED,
+ IONIC_LIF_QUEUE_RESET,
+
+ /* leave this as last */
+ IONIC_LIF_STATE_SIZE
+};
+
+#define IONIC_LIF_NAME_MAX_SZ 32
+struct ionic_lif {
+ char name[IONIC_LIF_NAME_MAX_SZ];
+ struct list_head list;
+ struct net_device *netdev;
+ DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE);
+ struct ionic *ionic;
+ bool registered;
+ unsigned int index;
+ unsigned int hw_index;
+ unsigned int kern_pid;
+ u64 __iomem *kern_dbpage;
+ spinlock_t adminq_lock; /* lock for AdminQ operations */
+ struct ionic_qcq *adminqcq;
+ struct ionic_qcq *notifyqcq;
+ struct ionic_qcqst *txqcqs;
+ struct ionic_qcqst *rxqcqs;
+ u64 last_eid;
+ unsigned int neqs;
+ unsigned int nxqs;
+ unsigned int ntxq_descs;
+ unsigned int nrxq_descs;
+ u32 rx_copybreak;
+ unsigned int rx_mode;
+ u64 hw_features;
+ bool mc_overflow;
+ unsigned int nmcast;
+ bool uc_overflow;
+ unsigned int nucast;
+
+ struct ionic_lif_info *info;
+ dma_addr_t info_pa;
+ u32 info_sz;
+
+ u16 rss_types;
+ u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
+ u8 *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_pa;
+ u32 rss_ind_tbl_sz;
+
+ struct ionic_rx_filters rx_filters;
+ struct ionic_deferred deferred;
+ unsigned long *dbid_inuse;
+ unsigned int dbid_count;
+ struct dentry *dentry;
+ u32 rx_coalesce_usecs;
+ u32 flags;
+ struct work_struct tx_timeout_work;
+};
+
+#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
+#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
+#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
+#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+
+static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
+{
+ unsigned long tlimit = jiffies + HZ;
+
+ while (test_and_set_bit(bitname, lif->state) &&
+ time_before(jiffies, tlimit))
+ usleep_range(100, 200);
+
+ return test_bit(bitname, lif->state);
+}
+
+static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
+{
+ u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
+ u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div);
+
+ /* Div-by-zero should never be an issue, but check anyway */
+ if (!div || !mult)
+ return 0;
+
+ /* Round up in case usecs is close to the next hw unit */
+ usecs += (div / mult) >> 1;
+
+ /* Convert from usecs to device units */
+ return (usecs * mult) / div;
+}
+
+static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
+{
+ u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
+ u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div);
+
+ /* Div-by-zero should never be an issue, but check anyway */
+ if (!div || !mult)
+ return 0;
+
+ /* Convert from device units to usec */
+ return (units * div) / mult;
+}
+
+int ionic_lifs_alloc(struct ionic *ionic);
+void ionic_lifs_free(struct ionic *ionic);
+void ionic_lifs_deinit(struct ionic *ionic);
+int ionic_lifs_init(struct ionic *ionic);
+int ionic_lifs_register(struct ionic *ionic);
+void ionic_lifs_unregister(struct ionic *ionic);
+int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
+ union ionic_lif_identity *lif_ident);
+int ionic_lifs_size(struct ionic *ionic);
+int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
+ const u8 *key, const u32 *indir);
+
+int ionic_open(struct net_device *netdev);
+int ionic_stop(struct net_device *netdev);
+int ionic_reset_queues(struct ionic_lif *lif);
+
+static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
+ struct ionic_txq_desc *desc, bool dbell)
+{
+ u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
+ & IONIC_TXQ_DESC_NSGE_MASK);
+
+ qcq->q.dbell_count += dbell;
+
+ if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
+ num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
+
+ qcq->stats->tx.sg_cntr[num_sg_elems]++;
+}
+
+static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
+ unsigned int work_done)
+{
+ qcq->napi_stats.poll_count++;
+
+ if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
+ work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
+
+ qcq->napi_stats.work_done_cntr[work_done]++;
+}
+
+#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
+#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
+#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
+#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
+ debug_stats_txq_post(qcq, txdesc, dbell)
+#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
+ debug_stats_napi_poll(qcq, work_done)
+
+#endif /* _IONIC_LIF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
new file mode 100644
index 000000000000..5ec67f3f1853
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/utsname.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_debugfs.h"
+
+MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
+MODULE_AUTHOR("Pensando Systems, Inc");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IONIC_DRV_VERSION);
+
+static const char *ionic_error_to_str(enum ionic_status_code code)
+{
+ switch (code) {
+ case IONIC_RC_SUCCESS:
+ return "IONIC_RC_SUCCESS";
+ case IONIC_RC_EVERSION:
+ return "IONIC_RC_EVERSION";
+ case IONIC_RC_EOPCODE:
+ return "IONIC_RC_EOPCODE";
+ case IONIC_RC_EIO:
+ return "IONIC_RC_EIO";
+ case IONIC_RC_EPERM:
+ return "IONIC_RC_EPERM";
+ case IONIC_RC_EQID:
+ return "IONIC_RC_EQID";
+ case IONIC_RC_EQTYPE:
+ return "IONIC_RC_EQTYPE";
+ case IONIC_RC_ENOENT:
+ return "IONIC_RC_ENOENT";
+ case IONIC_RC_EINTR:
+ return "IONIC_RC_EINTR";
+ case IONIC_RC_EAGAIN:
+ return "IONIC_RC_EAGAIN";
+ case IONIC_RC_ENOMEM:
+ return "IONIC_RC_ENOMEM";
+ case IONIC_RC_EFAULT:
+ return "IONIC_RC_EFAULT";
+ case IONIC_RC_EBUSY:
+ return "IONIC_RC_EBUSY";
+ case IONIC_RC_EEXIST:
+ return "IONIC_RC_EEXIST";
+ case IONIC_RC_EINVAL:
+ return "IONIC_RC_EINVAL";
+ case IONIC_RC_ENOSPC:
+ return "IONIC_RC_ENOSPC";
+ case IONIC_RC_ERANGE:
+ return "IONIC_RC_ERANGE";
+ case IONIC_RC_BAD_ADDR:
+ return "IONIC_RC_BAD_ADDR";
+ case IONIC_RC_DEV_CMD:
+ return "IONIC_RC_DEV_CMD";
+ case IONIC_RC_ERROR:
+ return "IONIC_RC_ERROR";
+ case IONIC_RC_ERDMA:
+ return "IONIC_RC_ERDMA";
+ default:
+ return "IONIC_RC_UNKNOWN";
+ }
+}
+
+static int ionic_error_to_errno(enum ionic_status_code code)
+{
+ switch (code) {
+ case IONIC_RC_SUCCESS:
+ return 0;
+ case IONIC_RC_EVERSION:
+ case IONIC_RC_EQTYPE:
+ case IONIC_RC_EQID:
+ case IONIC_RC_EINVAL:
+ return -EINVAL;
+ case IONIC_RC_EPERM:
+ return -EPERM;
+ case IONIC_RC_ENOENT:
+ return -ENOENT;
+ case IONIC_RC_EAGAIN:
+ return -EAGAIN;
+ case IONIC_RC_ENOMEM:
+ return -ENOMEM;
+ case IONIC_RC_EFAULT:
+ return -EFAULT;
+ case IONIC_RC_EBUSY:
+ return -EBUSY;
+ case IONIC_RC_EEXIST:
+ return -EEXIST;
+ case IONIC_RC_ENOSPC:
+ return -ENOSPC;
+ case IONIC_RC_ERANGE:
+ return -ERANGE;
+ case IONIC_RC_BAD_ADDR:
+ return -EFAULT;
+ case IONIC_RC_EOPCODE:
+ case IONIC_RC_EINTR:
+ case IONIC_RC_DEV_CMD:
+ case IONIC_RC_ERROR:
+ case IONIC_RC_ERDMA:
+ case IONIC_RC_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
+{
+ switch (opcode) {
+ case IONIC_CMD_NOP:
+ return "IONIC_CMD_NOP";
+ case IONIC_CMD_INIT:
+ return "IONIC_CMD_INIT";
+ case IONIC_CMD_RESET:
+ return "IONIC_CMD_RESET";
+ case IONIC_CMD_IDENTIFY:
+ return "IONIC_CMD_IDENTIFY";
+ case IONIC_CMD_GETATTR:
+ return "IONIC_CMD_GETATTR";
+ case IONIC_CMD_SETATTR:
+ return "IONIC_CMD_SETATTR";
+ case IONIC_CMD_PORT_IDENTIFY:
+ return "IONIC_CMD_PORT_IDENTIFY";
+ case IONIC_CMD_PORT_INIT:
+ return "IONIC_CMD_PORT_INIT";
+ case IONIC_CMD_PORT_RESET:
+ return "IONIC_CMD_PORT_RESET";
+ case IONIC_CMD_PORT_GETATTR:
+ return "IONIC_CMD_PORT_GETATTR";
+ case IONIC_CMD_PORT_SETATTR:
+ return "IONIC_CMD_PORT_SETATTR";
+ case IONIC_CMD_LIF_INIT:
+ return "IONIC_CMD_LIF_INIT";
+ case IONIC_CMD_LIF_RESET:
+ return "IONIC_CMD_LIF_RESET";
+ case IONIC_CMD_LIF_IDENTIFY:
+ return "IONIC_CMD_LIF_IDENTIFY";
+ case IONIC_CMD_LIF_SETATTR:
+ return "IONIC_CMD_LIF_SETATTR";
+ case IONIC_CMD_LIF_GETATTR:
+ return "IONIC_CMD_LIF_GETATTR";
+ case IONIC_CMD_RX_MODE_SET:
+ return "IONIC_CMD_RX_MODE_SET";
+ case IONIC_CMD_RX_FILTER_ADD:
+ return "IONIC_CMD_RX_FILTER_ADD";
+ case IONIC_CMD_RX_FILTER_DEL:
+ return "IONIC_CMD_RX_FILTER_DEL";
+ case IONIC_CMD_Q_INIT:
+ return "IONIC_CMD_Q_INIT";
+ case IONIC_CMD_Q_CONTROL:
+ return "IONIC_CMD_Q_CONTROL";
+ case IONIC_CMD_RDMA_RESET_LIF:
+ return "IONIC_CMD_RDMA_RESET_LIF";
+ case IONIC_CMD_RDMA_CREATE_EQ:
+ return "IONIC_CMD_RDMA_CREATE_EQ";
+ case IONIC_CMD_RDMA_CREATE_CQ:
+ return "IONIC_CMD_RDMA_CREATE_CQ";
+ case IONIC_CMD_RDMA_CREATE_ADMINQ:
+ return "IONIC_CMD_RDMA_CREATE_ADMINQ";
+ case IONIC_CMD_FW_DOWNLOAD:
+ return "IONIC_CMD_FW_DOWNLOAD";
+ case IONIC_CMD_FW_CONTROL:
+ return "IONIC_CMD_FW_CONTROL";
+ default:
+ return "DEVCMD_UNKNOWN";
+ }
+}
+
+static void ionic_adminq_flush(struct ionic_lif *lif)
+{
+ struct ionic_queue *adminq = &lif->adminqcq->q;
+
+ spin_lock(&lif->adminq_lock);
+
+ while (adminq->tail != adminq->head) {
+ memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
+ adminq->tail->cb = NULL;
+ adminq->tail->cb_arg = NULL;
+ adminq->tail = adminq->tail->next;
+ }
+ spin_unlock(&lif->adminq_lock);
+}
+
+static int ionic_adminq_check_err(struct ionic_lif *lif,
+ struct ionic_admin_ctx *ctx,
+ bool timeout)
+{
+ struct net_device *netdev = lif->netdev;
+ const char *opcode_str;
+ const char *status_str;
+ int err = 0;
+
+ if (ctx->comp.comp.status || timeout) {
+ opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ status_str = ionic_error_to_str(ctx->comp.comp.status);
+ err = timeout ? -ETIMEDOUT :
+ ionic_error_to_errno(ctx->comp.comp.status);
+
+ netdev_err(netdev, "%s (%d) failed: %s (%d)\n",
+ opcode_str, ctx->cmd.cmd.opcode,
+ timeout ? "TIMEOUT" : status_str, err);
+
+ if (timeout)
+ ionic_adminq_flush(lif);
+ }
+
+ return err;
+}
+
+static void ionic_adminq_cb(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, void *cb_arg)
+{
+ struct ionic_admin_ctx *ctx = cb_arg;
+ struct ionic_admin_comp *comp;
+ struct device *dev;
+
+ if (!ctx)
+ return;
+
+ comp = cq_info->cq_desc;
+ dev = &q->lif->netdev->dev;
+
+ memcpy(&ctx->comp, comp, sizeof(*comp));
+
+ dev_dbg(dev, "comp admin queue command:\n");
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx->comp, sizeof(ctx->comp), true);
+
+ complete_all(&ctx->work);
+}
+
+static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ struct ionic_queue *adminq = &lif->adminqcq->q;
+ int err = 0;
+
+ WARN_ON(in_interrupt());
+
+ spin_lock(&lif->adminq_lock);
+ if (!ionic_q_has_space(adminq, 1)) {
+ err = -ENOSPC;
+ goto err_out;
+ }
+
+ memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
+
+ dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx->cmd, sizeof(ctx->cmd), true);
+
+ ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
+
+err_out:
+ spin_unlock(&lif->adminq_lock);
+
+ return err;
+}
+
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ struct net_device *netdev = lif->netdev;
+ unsigned long remaining;
+ const char *name;
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+ if (err) {
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
+ name, ctx->cmd.cmd.opcode, err);
+ return err;
+ }
+
+ remaining = wait_for_completion_timeout(&ctx->work,
+ HZ * (ulong)DEVCMD_TIMEOUT);
+ return ionic_adminq_check_err(lif, ctx, (remaining == 0));
+}
+
+int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
+ ionic_cq_done_cb done_cb, void *done_arg)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *cq = &qcq->cq;
+ u32 work_done, flags = 0;
+
+ work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl,
+ cq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
+int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ unsigned long start_time;
+ unsigned long max_wait;
+ unsigned long duration;
+ int opcode;
+ int done;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ /* Wait for dev cmd to complete, retrying if we get EAGAIN,
+ * but don't wait any longer than max_seconds.
+ */
+ max_wait = jiffies + (max_seconds * HZ);
+try_again:
+ start_time = jiffies;
+ do {
+ done = ionic_dev_cmd_done(idev);
+ if (done)
+ break;
+ msleep(20);
+ } while (!done && time_before(jiffies, max_wait));
+ duration = jiffies - start_time;
+
+ opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
+ dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
+ ionic_opcode_to_str(opcode), opcode,
+ done, duration / HZ, duration);
+
+ if (!done && !time_before(jiffies, max_wait)) {
+ dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n",
+ ionic_opcode_to_str(opcode), opcode, max_seconds);
+ return -ETIMEDOUT;
+ }
+
+ err = ionic_dev_cmd_status(&ionic->idev);
+ if (err) {
+ if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) {
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
+
+ msleep(1000);
+ iowrite32(0, &idev->dev_cmd_regs->done);
+ iowrite32(1, &idev->dev_cmd_regs->doorbell);
+ goto try_again;
+ }
+
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
+
+ return ionic_error_to_errno(err);
+ }
+
+ return 0;
+}
+
+int ionic_setup(struct ionic *ionic)
+{
+ int err;
+
+ err = ionic_dev_setup(ionic);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int ionic_identify(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ struct ionic_dev *idev = &ionic->idev;
+ size_t sz;
+ int err;
+
+ memset(ident, 0, sizeof(*ident));
+
+ ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
+ strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION,
+ sizeof(ident->drv.driver_ver_str) - 1);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data));
+ memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz);
+
+ ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz);
+ }
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err)
+ goto err_out_unmap;
+
+ ionic_debugfs_add_ident(ionic);
+
+ return 0;
+
+err_out_unmap:
+ return err;
+}
+
+int ionic_init(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ int err;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_init(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_reset(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ int err;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_reset(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_port_identify(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ struct ionic_dev *idev = &ionic->idev;
+ size_t sz;
+ int err;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_port_identify(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->port), sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->port, &idev->dev_cmd_regs->data, sz);
+ }
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_port_init(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ struct ionic_dev *idev = &ionic->idev;
+ size_t sz;
+ int err;
+
+ if (idev->port_info)
+ return 0;
+
+ idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+ idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz,
+ &idev->port_info_pa,
+ GFP_KERNEL);
+ if (!idev->port_info) {
+ dev_err(ionic->dev, "Failed to allocate port info, aborting\n");
+ return -ENOMEM;
+ }
+
+ sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ memcpy_toio(&idev->dev_cmd_regs->data, &ident->port.config, sz);
+ ionic_dev_cmd_port_init(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
+ (void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ dev_err(ionic->dev, "Failed to init port\n");
+ dma_free_coherent(ionic->dev, idev->port_info_sz,
+ idev->port_info, idev->port_info_pa);
+ idev->port_info = NULL;
+ idev->port_info_pa = 0;
+ }
+
+ return err;
+}
+
+int ionic_port_reset(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ int err;
+
+ if (!idev->port_info)
+ return 0;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_reset(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ dma_free_coherent(ionic->dev, idev->port_info_sz,
+ idev->port_info, idev->port_info_pa);
+
+ idev->port_info = NULL;
+ idev->port_info_pa = 0;
+
+ if (err)
+ dev_err(ionic->dev, "Failed to reset port\n");
+
+ return err;
+}
+
+static int __init ionic_init_module(void)
+{
+ pr_info("%s %s, ver %s\n",
+ IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION);
+ ionic_debugfs_create();
+ return ionic_bus_register_driver();
+}
+
+static void __exit ionic_cleanup_module(void)
+{
+ ionic_bus_unregister_driver();
+ ionic_debugfs_destroy();
+
+ pr_info("%s removed\n", IONIC_DRV_NAME);
+}
+
+module_init(ionic_init_module);
+module_exit(ionic_cleanup_module);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h
new file mode 100644
index 000000000000..03ee5a36472b
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */
+
+#ifndef IONIC_REGS_H
+#define IONIC_REGS_H
+
+#include <linux/io.h>
+
+/** struct ionic_intr - interrupt control register set.
+ * @coal_init: coalesce timer initial value.
+ * @mask: interrupt mask value.
+ * @credits: interrupt credit count and return.
+ * @mask_assert: interrupt mask value on assert.
+ * @coal: coalesce timer time remaining.
+ */
+struct ionic_intr {
+ u32 coal_init;
+ u32 mask;
+ u32 credits;
+ u32 mask_assert;
+ u32 coal;
+ u32 rsvd[3];
+};
+
+#define IONIC_INTR_CTRL_REGS_MAX 2048
+#define IONIC_INTR_CTRL_COAL_MAX 0x3F
+
+/** enum ionic_intr_mask_vals - valid values for mask and mask_assert.
+ * @IONIC_INTR_MASK_CLEAR: unmask interrupt.
+ * @IONIC_INTR_MASK_SET: mask interrupt.
+ */
+enum ionic_intr_mask_vals {
+ IONIC_INTR_MASK_CLEAR = 0,
+ IONIC_INTR_MASK_SET = 1,
+};
+
+/** enum ionic_intr_credits_bits - bitwise composition of credits values.
+ * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed.
+ * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit.
+ * @IONIC_INTR_CRED_UNMASK: unmask the interrupt.
+ * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer.
+ * @IONIC_INTR_CRED_REARM: unmask the and reset the timer.
+ */
+enum ionic_intr_credits_bits {
+ IONIC_INTR_CRED_COUNT = 0x7fffu,
+ IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu,
+ IONIC_INTR_CRED_UNMASK = 0x10000u,
+ IONIC_INTR_CRED_RESET_COALESCE = 0x20000u,
+ IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK |
+ IONIC_INTR_CRED_RESET_COALESCE),
+};
+
+static inline void ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl,
+ int intr_idx, u32 coal)
+{
+ iowrite32(coal, &intr_ctrl[intr_idx].coal_init);
+}
+
+static inline void ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl,
+ int intr_idx, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl[intr_idx].mask);
+}
+
+static inline void ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl,
+ int intr_idx, u32 cred, u32 flags)
+{
+ if (WARN_ON_ONCE(cred > IONIC_INTR_CRED_COUNT)) {
+ cred = ioread32(&intr_ctrl[intr_idx].credits);
+ cred &= IONIC_INTR_CRED_COUNT_SIGNED;
+ }
+
+ iowrite32(cred | flags, &intr_ctrl[intr_idx].credits);
+}
+
+static inline void ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl,
+ int intr_idx)
+{
+ u32 cred;
+
+ cred = ioread32(&intr_ctrl[intr_idx].credits);
+ cred &= IONIC_INTR_CRED_COUNT_SIGNED;
+ cred |= IONIC_INTR_CRED_RESET_COALESCE;
+ iowrite32(cred, &intr_ctrl[intr_idx].credits);
+}
+
+static inline void ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl,
+ int intr_idx, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl[intr_idx].mask_assert);
+}
+
+/** enum ionic_dbell_bits - bitwise composition of dbell values.
+ *
+ * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits.
+ * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value.
+ * @IONIC_DBELL_QID: macro to build QID component of dbell value.
+ *
+ * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits.
+ * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value.
+ * @IONIC_DBELL_RING: macro to build ring component of dbell value.
+ *
+ * @IONIC_DBELL_RING_0: ring zero dbell component value.
+ * @IONIC_DBELL_RING_1: ring one dbell component value.
+ * @IONIC_DBELL_RING_2: ring two dbell component value.
+ * @IONIC_DBELL_RING_3: ring three dbell component value.
+ *
+ * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed.
+ */
+enum ionic_dbell_bits {
+ IONIC_DBELL_QID_MASK = 0xffffff,
+ IONIC_DBELL_QID_SHIFT = 24,
+
+#define IONIC_DBELL_QID(n) \
+ (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT)
+
+ IONIC_DBELL_RING_MASK = 0x7,
+ IONIC_DBELL_RING_SHIFT = 16,
+
+#define IONIC_DBELL_RING(n) \
+ (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT)
+
+ IONIC_DBELL_RING_0 = 0,
+ IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1),
+ IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2),
+ IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3),
+
+ IONIC_DBELL_INDEX_MASK = 0xffff,
+};
+
+static inline void ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val)
+{
+ writeq(val, &db_page[qtype]);
+}
+
+#endif /* IONIC_REGS_H */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
new file mode 100644
index 000000000000..7a093f148ee5
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_rx_filter.h"
+
+void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
+{
+ struct device *dev = lif->ionic->dev;
+
+ hlist_del(&f->by_id);
+ hlist_del(&f->by_hash);
+ devm_kfree(dev, f);
+}
+
+int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .filter_id = cpu_to_le32(f->filter_id),
+ },
+ };
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+int ionic_rx_filters_init(struct ionic_lif *lif)
+{
+ unsigned int i;
+
+ spin_lock_init(&lif->rx_filters.lock);
+
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
+ INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
+ }
+
+ return 0;
+}
+
+void ionic_rx_filters_deinit(struct ionic_lif *lif)
+{
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id)
+ ionic_rx_filter_free(lif, f);
+ }
+}
+
+int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
+ u32 hash, struct ionic_admin_ctx *ctx)
+{
+ struct device *dev = lif->ionic->dev;
+ struct ionic_rx_filter_add_cmd *ac;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ unsigned int key;
+
+ ac = &ctx->cmd.rx_filter_add;
+
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ key = le16_to_cpu(ac->vlan.vlan);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ key = *(u32 *)ac->mac.addr;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ key = le16_to_cpu(ac->mac_vlan.vlan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ f->flow_id = flow_id;
+ f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
+ f->rxq_index = rxq_index;
+ memcpy(&f->cmd, ac, sizeof(f->cmd));
+
+ INIT_HLIST_NODE(&f->by_hash);
+ INIT_HLIST_NODE(&f->by_id);
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
+ head = &lif->rx_filters.by_hash[key];
+ hlist_add_head(&f->by_hash, head);
+
+ key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
+ head = &lif->rx_filters.by_id[key];
+ hlist_add_head(&f->by_id, head);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return 0;
+}
+
+struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
+{
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ unsigned int key;
+
+ key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
+ head = &lif->rx_filters.by_hash[key];
+
+ hlist_for_each_entry(f, head, by_hash) {
+ if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
+ continue;
+ if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
+ return f;
+ }
+
+ return NULL;
+}
+
+struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
+ const u8 *addr)
+{
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ unsigned int key;
+
+ key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
+ head = &lif->rx_filters.by_hash[key];
+
+ hlist_for_each_entry(f, head, by_hash) {
+ if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
+ continue;
+ if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
+ return f;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
new file mode 100644
index 000000000000..b6aec9c19918
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_RX_FILTER_H_
+#define _IONIC_RX_FILTER_H_
+
+#define IONIC_RXQ_INDEX_ANY (0xFFFF)
+struct ionic_rx_filter {
+ u32 flow_id;
+ u32 filter_id;
+ u16 rxq_index;
+ struct ionic_rx_filter_add_cmd cmd;
+ struct hlist_node by_hash;
+ struct hlist_node by_id;
+};
+
+#define IONIC_RX_FILTER_HASH_BITS 10
+#define IONIC_RX_FILTER_HLISTS BIT(IONIC_RX_FILTER_HASH_BITS)
+#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1)
+struct ionic_rx_filters {
+ spinlock_t lock; /* filter list lock */
+ struct hlist_head by_hash[IONIC_RX_FILTER_HLISTS]; /* by skb hash */
+ struct hlist_head by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */
+};
+
+void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f);
+int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f);
+int ionic_rx_filters_init(struct ionic_lif *lif);
+void ionic_rx_filters_deinit(struct ionic_lif *lif);
+int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
+ u32 hash, struct ionic_admin_ctx *ctx);
+struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid);
+struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr);
+
+#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
new file mode 100644
index 000000000000..e2907884f843
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_stats.h"
+
+static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
+ IONIC_LIF_STAT_DESC(tx_packets),
+ IONIC_LIF_STAT_DESC(tx_bytes),
+ IONIC_LIF_STAT_DESC(rx_packets),
+ IONIC_LIF_STAT_DESC(rx_bytes),
+ IONIC_LIF_STAT_DESC(tx_tso),
+ IONIC_LIF_STAT_DESC(tx_no_csum),
+ IONIC_LIF_STAT_DESC(tx_csum),
+ IONIC_LIF_STAT_DESC(rx_csum_none),
+ IONIC_LIF_STAT_DESC(rx_csum_complete),
+ IONIC_LIF_STAT_DESC(rx_csum_error),
+};
+
+static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
+ IONIC_TX_STAT_DESC(pkts),
+ IONIC_TX_STAT_DESC(bytes),
+ IONIC_TX_STAT_DESC(clean),
+ IONIC_TX_STAT_DESC(dma_map_err),
+ IONIC_TX_STAT_DESC(linearize),
+ IONIC_TX_STAT_DESC(frags),
+};
+
+static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
+ IONIC_RX_STAT_DESC(pkts),
+ IONIC_RX_STAT_DESC(bytes),
+ IONIC_RX_STAT_DESC(dma_map_err),
+ IONIC_RX_STAT_DESC(alloc_err),
+ IONIC_RX_STAT_DESC(csum_none),
+ IONIC_RX_STAT_DESC(csum_complete),
+ IONIC_RX_STAT_DESC(csum_error),
+};
+
+static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
+ IONIC_TX_Q_STAT_DESC(stop),
+ IONIC_TX_Q_STAT_DESC(wake),
+ IONIC_TX_Q_STAT_DESC(drop),
+ IONIC_TX_Q_STAT_DESC(dbell_count),
+};
+
+static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
+ IONIC_CQ_STAT_DESC(compl_count),
+};
+
+static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
+ IONIC_INTR_STAT_DESC(rearm_count),
+};
+
+static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
+ IONIC_NAPI_STAT_DESC(poll_count),
+};
+
+#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
+#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
+#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
+#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
+#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
+#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
+#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
+
+#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
+
+static void ionic_get_lif_stats(struct ionic_lif *lif,
+ struct ionic_lif_sw_stats *stats)
+{
+ struct ionic_tx_stats *tstats;
+ struct ionic_rx_stats *rstats;
+ struct ionic_qcq *txqcq;
+ struct ionic_qcq *rxqcq;
+ int q_num;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
+ txqcq = lif_to_txqcq(lif, q_num);
+ if (txqcq && txqcq->stats) {
+ tstats = &txqcq->stats->tx;
+ stats->tx_packets += tstats->pkts;
+ stats->tx_bytes += tstats->bytes;
+ stats->tx_tso += tstats->tso;
+ stats->tx_no_csum += tstats->no_csum;
+ stats->tx_csum += tstats->csum;
+ }
+
+ rxqcq = lif_to_rxqcq(lif, q_num);
+ if (rxqcq && rxqcq->stats) {
+ rstats = &rxqcq->stats->rx;
+ stats->rx_packets += rstats->pkts;
+ stats->rx_bytes += rstats->bytes;
+ stats->rx_csum_none += rstats->csum_none;
+ stats->rx_csum_complete += rstats->csum_complete;
+ stats->rx_csum_error += rstats->csum_error;
+ }
+ }
+}
+
+static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
+{
+ u64 total = 0;
+
+ /* lif stats */
+ total += IONIC_NUM_LIF_STATS;
+
+ /* tx stats */
+ total += MAX_Q(lif) * IONIC_NUM_TX_STATS;
+
+ /* rx stats */
+ total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ /* tx debug stats */
+ total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
+ IONIC_NUM_TX_Q_STATS +
+ IONIC_NUM_DBG_INTR_STATS +
+ IONIC_MAX_NUM_SG_CNTR);
+
+ /* rx debug stats */
+ total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
+ IONIC_NUM_DBG_INTR_STATS +
+ IONIC_NUM_DBG_NAPI_STATS +
+ IONIC_MAX_NUM_NAPI_CNTR);
+ }
+
+ return total;
+}
+
+static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
+{
+ int i, q_num;
+
+ for (i = 0; i < IONIC_NUM_LIF_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
+ for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
+ q_num, ionic_tx_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "txq_%d_%s",
+ q_num,
+ ionic_txq_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "txq_%d_cq_%s",
+ q_num,
+ ionic_dbg_cq_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "txq_%d_intr_%s",
+ q_num,
+ ionic_dbg_intr_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "txq_%d_sg_cntr_%d",
+ q_num, i);
+ *buf += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
+ for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "rx_%d_%s",
+ q_num, ionic_rx_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "rxq_%d_cq_%s",
+ q_num,
+ ionic_dbg_cq_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "rxq_%d_intr_%s",
+ q_num,
+ ionic_dbg_intr_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "rxq_%d_napi_%s",
+ q_num,
+ ionic_dbg_napi_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ "rxq_%d_napi_work_done_%d",
+ q_num, i);
+ *buf += ETH_GSTRING_LEN;
+ }
+ }
+ }
+}
+
+static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
+{
+ struct ionic_lif_sw_stats lif_stats;
+ struct ionic_qcq *txqcq, *rxqcq;
+ int i, q_num;
+
+ ionic_get_lif_stats(lif, &lif_stats);
+
+ for (i = 0; i < IONIC_NUM_LIF_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&lif_stats, &ionic_lif_stats_desc[i]);
+ (*buf)++;
+ }
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
+ txqcq = lif_to_txqcq(lif, q_num);
+
+ for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+ &ionic_tx_stats_desc[i]);
+ (*buf)++;
+ }
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->q,
+ &ionic_txq_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->cq,
+ &ionic_dbg_cq_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->intr,
+ &ionic_dbg_intr_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
+ **buf = txqcq->stats->tx.sg_cntr[i];
+ (*buf)++;
+ }
+ }
+ }
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
+ rxqcq = lif_to_rxqcq(lif, q_num);
+
+ for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+ &ionic_rx_stats_desc[i]);
+ (*buf)++;
+ }
+
+ if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->cq,
+ &ionic_dbg_cq_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->intr,
+ &ionic_dbg_intr_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
+ &ionic_dbg_napi_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
+ **buf = rxqcq->napi_stats.work_done_cntr[i];
+ (*buf)++;
+ }
+ }
+ }
+}
+
+const struct ionic_stats_group_intf ionic_stats_groups[] = {
+ /* SW Stats group */
+ {
+ .get_strings = ionic_sw_stats_get_strings,
+ .get_values = ionic_sw_stats_get_values,
+ .get_count = ionic_sw_stats_get_count,
+ },
+ /* Add more stat groups here */
+};
+
+const int ionic_num_stats_grps = ARRAY_SIZE(ionic_stats_groups);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
new file mode 100644
index 000000000000..d2c1122a2c6e
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_STATS_H_
+#define _IONIC_STATS_H_
+
+#define IONIC_STAT_TO_OFFSET(type, stat_name) (offsetof(type, stat_name))
+
+#define IONIC_STAT_DESC(type, stat_name) { \
+ .name = #stat_name, \
+ .offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
+}
+
+#define IONIC_LIF_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
+
+#define IONIC_TX_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_tx_stats, stat_name)
+
+#define IONIC_RX_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_rx_stats, stat_name)
+
+#define IONIC_TX_Q_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_queue, stat_name)
+
+#define IONIC_CQ_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_cq, stat_name)
+
+#define IONIC_INTR_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_intr_info, stat_name)
+
+#define IONIC_NAPI_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_napi_stats, stat_name)
+
+/* Interface structure for a particalar stats group */
+struct ionic_stats_group_intf {
+ void (*get_strings)(struct ionic_lif *lif, u8 **buf);
+ void (*get_values)(struct ionic_lif *lif, u64 **buf);
+ u64 (*get_count)(struct ionic_lif *lif);
+};
+
+extern const struct ionic_stats_group_intf ionic_stats_groups[];
+extern const int ionic_num_stats_grps;
+
+#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
+ (*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+
+struct ionic_stat_desc {
+ char name[ETH_GSTRING_LEN];
+ u64 offset;
+};
+
+#endif /* _IONIC_STATS_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
new file mode 100644
index 000000000000..ab6663d94f42
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip6_checksum.h>
+
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_txrx.h"
+
+static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, void *cb_arg);
+
+static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
+ ionic_desc_cb cb_func, void *cb_arg)
+{
+ DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
+
+ ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+}
+
+static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
+ ionic_desc_cb cb_func, void *cb_arg)
+{
+ ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+
+ DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
+}
+
+static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+{
+ return netdev_get_tx_queue(q->lif->netdev, q->index);
+}
+
+static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ionic_rxq_desc *old = desc_info->desc;
+ struct ionic_rxq_desc *new = q->head->desc;
+
+ new->addr = old->addr;
+ new->len = old->len;
+
+ ionic_rxq_post(q, true, ionic_rx_clean, skb);
+}
+
+static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, struct sk_buff **skb)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct ionic_rxq_desc *desc = desc_info->desc;
+ struct net_device *netdev = q->lif->netdev;
+ struct device *dev = q->lif->ionic->dev;
+ struct sk_buff *new_skb;
+ u16 clen, dlen;
+
+ clen = le16_to_cpu(comp->len);
+ dlen = le16_to_cpu(desc->len);
+ if (clen > q->lif->rx_copybreak) {
+ dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
+ dlen, DMA_FROM_DEVICE);
+ return false;
+ }
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, clen);
+ if (!new_skb) {
+ dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
+ dlen, DMA_FROM_DEVICE);
+ return false;
+ }
+
+ dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr),
+ clen, DMA_FROM_DEVICE);
+
+ memcpy(new_skb->data, (*skb)->data, clen);
+
+ ionic_rx_recycle(q, desc_info, *skb);
+ *skb = new_skb;
+
+ return true;
+}
+
+static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, void *cb_arg)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct ionic_qcq *qcq = q_to_qcq(q);
+ struct sk_buff *skb = cb_arg;
+ struct ionic_rx_stats *stats;
+ struct net_device *netdev;
+
+ stats = q_to_rx_stats(q);
+ netdev = q->lif->netdev;
+
+ if (comp->status) {
+ ionic_rx_recycle(q, desc_info, skb);
+ return;
+ }
+
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ /* no packet processing while resetting */
+ ionic_rx_recycle(q, desc_info, skb);
+ return;
+ }
+
+ stats->pkts++;
+ stats->bytes += le16_to_cpu(comp->len);
+
+ ionic_rx_copybreak(q, desc_info, cq_info, &skb);
+
+ skb_put(skb, le16_to_cpu(comp->len));
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, q->index);
+
+ if (netdev->features & NETIF_F_RXHASH) {
+ switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
+ case IONIC_PKT_TYPE_IPV4:
+ case IONIC_PKT_TYPE_IPV6:
+ skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
+ PKT_HASH_TYPE_L3);
+ break;
+ case IONIC_PKT_TYPE_IPV4_TCP:
+ case IONIC_PKT_TYPE_IPV6_TCP:
+ case IONIC_PKT_TYPE_IPV4_UDP:
+ case IONIC_PKT_TYPE_IPV6_UDP:
+ skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
+ PKT_HASH_TYPE_L4);
+ break;
+ }
+ }
+
+ if (netdev->features & NETIF_F_RXCSUM) {
+ if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__wsum)le16_to_cpu(comp->csum);
+ stats->csum_complete++;
+ }
+ } else {
+ stats->csum_none++;
+ }
+
+ if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))
+ stats->csum_error++;
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(comp->vlan_tci));
+ }
+
+ napi_gro_receive(&qcq->napi, skb);
+}
+
+static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct ionic_queue *q = cq->bound_q;
+ struct ionic_desc_info *desc_info;
+
+ if (!color_match(comp->pkt_type_color, cq->done_color))
+ return false;
+
+ /* check for empty queue */
+ if (q->tail->index == q->head->index)
+ return false;
+
+ desc_info = q->tail;
+ if (desc_info->index != le16_to_cpu(comp->comp_index))
+ return false;
+
+ q->tail = desc_info->next;
+
+ /* clean the related q entry, only one per qc completion */
+ ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
+
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+
+ return true;
+}
+
+static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
+{
+ u32 work_done = 0;
+
+ while (ionic_rx_service(rxcq, rxcq->tail)) {
+ if (rxcq->tail->last)
+ rxcq->done_color = !rxcq->done_color;
+ rxcq->tail = rxcq->tail->next;
+ DEBUG_STATS_CQE_CNT(rxcq);
+
+ if (++work_done >= limit)
+ break;
+ }
+
+ return work_done;
+}
+
+void ionic_rx_flush(struct ionic_cq *cq)
+{
+ struct ionic_dev *idev = &cq->lif->ionic->idev;
+ u32 work_done;
+
+ work_done = ionic_rx_walk_cq(cq, cq->num_descs);
+
+ if (work_done)
+ ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ work_done, IONIC_INTR_CRED_RESET_COALESCE);
+}
+
+static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len,
+ dma_addr_t *dma_addr)
+{
+ struct ionic_lif *lif = q->lif;
+ struct ionic_rx_stats *stats;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+ struct device *dev;
+
+ netdev = lif->netdev;
+ dev = lif->ionic->dev;
+ stats = q_to_rx_stats(q);
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
+ return NULL;
+ }
+
+ *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ dev_kfree_skb(skb);
+ net_warn_ratelimited("%s: DMA single map failed on %s!\n",
+ netdev->name, q->name);
+ stats->dma_map_err++;
+ return NULL;
+ }
+
+ return skb;
+}
+
+#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1)
+
+void ionic_rx_fill(struct ionic_queue *q)
+{
+ struct net_device *netdev = q->lif->netdev;
+ struct ionic_rxq_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool ring_doorbell;
+ unsigned int len;
+ unsigned int i;
+
+ len = netdev->mtu + ETH_HLEN;
+
+ for (i = ionic_q_space_avail(q); i; i--) {
+ skb = ionic_rx_skb_alloc(q, len, &dma_addr);
+ if (!skb)
+ return;
+
+ desc = q->head->desc;
+ desc->addr = cpu_to_le64(dma_addr);
+ desc->len = cpu_to_le16(len);
+ desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE;
+
+ ring_doorbell = ((q->head->index + 1) &
+ IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
+ }
+}
+
+static void ionic_rx_fill_cb(void *arg)
+{
+ ionic_rx_fill(arg);
+}
+
+void ionic_rx_empty(struct ionic_queue *q)
+{
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *cur;
+ struct ionic_rxq_desc *desc;
+
+ for (cur = q->tail; cur != q->head; cur = cur->next) {
+ desc = cur->desc;
+ dma_unmap_single(dev, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->len), DMA_FROM_DEVICE);
+ dev_kfree_skb(cur->cb_arg);
+ cur->cb_arg = NULL;
+ }
+}
+
+int ionic_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *rxcq = napi_to_cq(napi);
+ unsigned int qi = rxcq->bound_q->index;
+ struct ionic_dev *idev;
+ struct ionic_lif *lif;
+ struct ionic_cq *txcq;
+ u32 work_done = 0;
+ u32 flags = 0;
+
+ lif = rxcq->bound_q->lif;
+ idev = &lif->ionic->idev;
+ txcq = &lif->txqcqs[qi].qcq->cq;
+
+ ionic_tx_flush(txcq);
+
+ work_done = ionic_rx_walk_cq(rxcq, budget);
+
+ if (work_done)
+ ionic_rx_fill_cb(rxcq->bound_q);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct device *dev = q->lif->ionic->dev;
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ net_warn_ratelimited("%s: DMA single map failed on %s!\n",
+ q->lif->netdev->name, q->name);
+ stats->dma_map_err++;
+ return 0;
+ }
+ return dma_addr;
+}
+
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
+ size_t offset, size_t len)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct device *dev = q->lif->ionic->dev;
+ dma_addr_t dma_addr;
+
+ dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
+ q->lif->netdev->name, q->name);
+ stats->dma_map_err++;
+ }
+ return dma_addr;
+}
+
+static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info, void *cb_arg)
+{
+ struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
+ struct ionic_txq_sg_elem *elem = sg_desc->elems;
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_desc *desc = desc_info->desc;
+ struct device *dev = q->lif->ionic->dev;
+ u8 opcode, flags, nsge;
+ u16 queue_index;
+ unsigned int i;
+ u64 addr;
+
+ decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
+ &opcode, &flags, &nsge, &addr);
+
+ /* use unmap_single only if either this is not TSO,
+ * or this is first descriptor of a TSO
+ */
+ if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
+ flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
+ dma_unmap_single(dev, (dma_addr_t)addr,
+ le16_to_cpu(desc->len), DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, (dma_addr_t)addr,
+ le16_to_cpu(desc->len), DMA_TO_DEVICE);
+
+ for (i = 0; i < nsge; i++, elem++)
+ dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
+ le16_to_cpu(elem->len), DMA_TO_DEVICE);
+
+ if (cb_arg) {
+ struct sk_buff *skb = cb_arg;
+ u32 len = skb->len;
+
+ queue_index = skb_get_queue_mapping(skb);
+ if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
+ queue_index))) {
+ netif_wake_subqueue(q->lif->netdev, queue_index);
+ q->wake++;
+ }
+ dev_kfree_skb_any(skb);
+ stats->clean++;
+ netdev_tx_completed_queue(q_to_ndq(q), 1, len);
+ }
+}
+
+void ionic_tx_flush(struct ionic_cq *cq)
+{
+ struct ionic_txq_comp *comp = cq->tail->cq_desc;
+ struct ionic_dev *idev = &cq->lif->ionic->idev;
+ struct ionic_queue *q = cq->bound_q;
+ struct ionic_desc_info *desc_info;
+ unsigned int work_done = 0;
+
+ /* walk the completed cq entries */
+ while (work_done < cq->num_descs &&
+ color_match(comp->color, cq->done_color)) {
+
+ /* clean the related q entries, there could be
+ * several q entries completed for each cq completion
+ */
+ do {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+ ionic_tx_clean(q, desc_info, cq->tail,
+ desc_info->cb_arg);
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ } while (desc_info->index != le16_to_cpu(comp->comp_index));
+
+ if (cq->tail->last)
+ cq->done_color = !cq->done_color;
+
+ cq->tail = cq->tail->next;
+ comp = cq->tail->cq_desc;
+ DEBUG_STATS_CQE_CNT(cq);
+
+ work_done++;
+ }
+
+ if (work_done)
+ ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ work_done, 0);
+}
+
+static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
+{
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err)
+ return err;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ inner_ip_hdr(skb)->check = 0;
+ inner_tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
+ inner_tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ return 0;
+}
+
+static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
+{
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err)
+ return err;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ return 0;
+}
+
+static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
+ struct sk_buff *skb,
+ dma_addr_t addr, u8 nsge, u16 len,
+ unsigned int hdrlen, unsigned int mss,
+ bool outer_csum,
+ u16 vlan_tci, bool has_vlan,
+ bool start, bool done)
+{
+ u8 flags = 0;
+ u64 cmd;
+
+ flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+ flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+ flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
+ flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(len);
+ desc->vlan_tci = cpu_to_le16(vlan_tci);
+ desc->hdr_len = cpu_to_le16(hdrlen);
+ desc->mss = cpu_to_le16(mss);
+
+ if (done) {
+ skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+ ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ } else {
+ ionic_txq_post(q, false, ionic_tx_clean, NULL);
+ }
+}
+
+static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
+ struct ionic_txq_sg_elem **elem)
+{
+ struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
+ struct ionic_txq_desc *desc = q->head->desc;
+
+ *elem = sg_desc->elems;
+ return desc;
+}
+
+static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_desc_info *abort = q->head;
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *rewind = abort;
+ struct ionic_txq_sg_elem *elem;
+ struct ionic_txq_desc *desc;
+ unsigned int frag_left = 0;
+ unsigned int offset = 0;
+ unsigned int len_left;
+ dma_addr_t desc_addr;
+ unsigned int hdrlen;
+ unsigned int nfrags;
+ unsigned int seglen;
+ u64 total_bytes = 0;
+ u64 total_pkts = 0;
+ unsigned int left;
+ unsigned int len;
+ unsigned int mss;
+ skb_frag_t *frag;
+ bool start, done;
+ bool outer_csum;
+ bool has_vlan;
+ u16 desc_len;
+ u8 desc_nsge;
+ u16 vlan_tci;
+ bool encap;
+ int err;
+
+ mss = skb_shinfo(skb)->gso_size;
+ nfrags = skb_shinfo(skb)->nr_frags;
+ len_left = skb->len - skb_headlen(skb);
+ outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
+ has_vlan = !!skb_vlan_tag_present(skb);
+ vlan_tci = skb_vlan_tag_get(skb);
+ encap = skb->encapsulation;
+
+ /* Preload inner-most TCP csum field with IP pseudo hdr
+ * calculated with IP length set to zero. HW will later
+ * add in length to each TCP segment resulting from the TSO.
+ */
+
+ if (encap)
+ err = ionic_tx_tcp_inner_pseudo_csum(skb);
+ else
+ err = ionic_tx_tcp_pseudo_csum(skb);
+ if (err)
+ return err;
+
+ if (encap)
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ else
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ seglen = hdrlen + mss;
+ left = skb_headlen(skb);
+
+ desc = ionic_tx_tso_next(q, &elem);
+ start = true;
+
+ /* Chop skb->data up into desc segments */
+
+ while (left > 0) {
+ len = min(seglen, left);
+ frag_left = seglen - len;
+ desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
+ if (dma_mapping_error(dev, desc_addr))
+ goto err_out_abort;
+ desc_len = len;
+ desc_nsge = 0;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb,
+ desc_addr, desc_nsge, desc_len,
+ hdrlen, mss,
+ outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ seglen = mss;
+ }
+
+ /* Chop skb frags into desc segments */
+
+ for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+ offset = 0;
+ left = skb_frag_size(frag);
+ len_left -= left;
+ nfrags--;
+ stats->frags++;
+
+ while (left > 0) {
+ if (frag_left > 0) {
+ len = min(frag_left, left);
+ frag_left -= len;
+ elem->addr =
+ cpu_to_le64(ionic_tx_map_frag(q, frag,
+ offset, len));
+ if (dma_mapping_error(dev, elem->addr))
+ goto err_out_abort;
+ elem->len = cpu_to_le16(len);
+ elem++;
+ desc_nsge++;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb, desc_addr,
+ desc_nsge, desc_len,
+ hdrlen, mss, outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ } else {
+ len = min(mss, left);
+ frag_left = mss - len;
+ desc_addr = ionic_tx_map_frag(q, frag,
+ offset, len);
+ if (dma_mapping_error(dev, desc_addr))
+ goto err_out_abort;
+ desc_len = len;
+ desc_nsge = 0;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb, desc_addr,
+ desc_nsge, desc_len,
+ hdrlen, mss, outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ }
+ }
+ }
+
+ stats->pkts += total_pkts;
+ stats->bytes += total_bytes;
+ stats->tso++;
+
+ return 0;
+
+err_out_abort:
+ while (rewind->desc != q->head->desc) {
+ ionic_tx_clean(q, rewind, NULL, NULL);
+ rewind = rewind->next;
+ }
+ q->head = abort;
+
+ return -ENOMEM;
+}
+
+static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_desc *desc = q->head->desc;
+ struct device *dev = q->lif->ionic->dev;
+ dma_addr_t dma_addr;
+ bool has_vlan;
+ u8 flags = 0;
+ bool encap;
+ u64 cmd;
+
+ has_vlan = !!skb_vlan_tag_present(skb);
+ encap = skb->encapsulation;
+
+ dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+
+ flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+ flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
+ flags, skb_shinfo(skb)->nr_frags, dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(skb_headlen(skb));
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
+ desc->csum_offset = cpu_to_le16(skb->csum_offset);
+
+ if (skb->csum_not_inet)
+ stats->crc32_csum++;
+ else
+ stats->csum++;
+
+ return 0;
+}
+
+static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_desc *desc = q->head->desc;
+ struct device *dev = q->lif->ionic->dev;
+ dma_addr_t dma_addr;
+ bool has_vlan;
+ u8 flags = 0;
+ bool encap;
+ u64 cmd;
+
+ has_vlan = !!skb_vlan_tag_present(skb);
+ encap = skb->encapsulation;
+
+ dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+
+ flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+ flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
+ flags, skb_shinfo(skb)->nr_frags, dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(skb_headlen(skb));
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+
+ stats->no_csum++;
+
+ return 0;
+}
+
+static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
+ unsigned int len_left = skb->len - skb_headlen(skb);
+ struct ionic_txq_sg_elem *elem = sg_desc->elems;
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct device *dev = q->lif->ionic->dev;
+ dma_addr_t dma_addr;
+ skb_frag_t *frag;
+ u16 len;
+
+ for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
+ len = skb_frag_size(frag);
+ elem->len = cpu_to_le16(len);
+ dma_addr = ionic_tx_map_frag(q, frag, 0, len);
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+ elem->addr = cpu_to_le64(dma_addr);
+ len_left -= len;
+ stats->frags++;
+ }
+
+ return 0;
+}
+
+static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int err;
+
+ /* set up the initial descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ err = ionic_tx_calc_csum(q, skb);
+ else
+ err = ionic_tx_calc_no_csum(q, skb);
+ if (err)
+ return err;
+
+ /* add frags */
+ err = ionic_tx_skb_frags(q, skb);
+ if (err)
+ return err;
+
+ skb_tx_timestamp(skb);
+ stats->pkts++;
+ stats->bytes += skb->len;
+
+ netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+ ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+
+ return 0;
+}
+
+static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int err;
+
+ /* If TSO, need roundup(skb->len/mss) descs */
+ if (skb_is_gso(skb))
+ return (skb->len / skb_shinfo(skb)->gso_size) + 1;
+
+ /* If non-TSO, just need 1 desc and nr_frags sg elems */
+ if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
+ return 1;
+
+ /* Too many frags, so linearize */
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+
+ stats->linearize++;
+
+ /* Need 1 desc and zero sg elems */
+ return 1;
+}
+
+static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
+{
+ int stopped = 0;
+
+ if (unlikely(!ionic_q_has_space(q, ndescs))) {
+ netif_stop_subqueue(q->lif->netdev, q->index);
+ q->stop++;
+ stopped = 1;
+
+ /* Might race with ionic_tx_clean, check again */
+ smp_rmb();
+ if (ionic_q_has_space(q, ndescs)) {
+ netif_wake_subqueue(q->lif->netdev, q->index);
+ stopped = 0;
+ }
+ }
+
+ return stopped;
+}
+
+netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ u16 queue_index = skb_get_queue_mapping(skb);
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *q;
+ int ndescs;
+ int err;
+
+ if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(!lif_to_txqcq(lif, queue_index)))
+ queue_index = 0;
+ q = lif_to_txq(lif, queue_index);
+
+ ndescs = ionic_tx_descs_needed(q, skb);
+ if (ndescs < 0)
+ goto err_out_drop;
+
+ if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
+ return NETDEV_TX_BUSY;
+
+ if (skb_is_gso(skb))
+ err = ionic_tx_tso(q, skb);
+ else
+ err = ionic_tx(q, skb);
+
+ if (err)
+ goto err_out_drop;
+
+ /* Stop the queue if there aren't descriptors for the next packet.
+ * Since our SG lists per descriptor take care of most of the possible
+ * fragmentation, we don't need to have many descriptors available.
+ */
+ ionic_maybe_stop_tx(q, 4);
+
+ return NETDEV_TX_OK;
+
+err_out_drop:
+ q->stop++;
+ q->drop++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
new file mode 100644
index 000000000000..53775c62c85a
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+
+#ifndef _IONIC_TXRX_H_
+#define _IONIC_TXRX_H_
+
+void ionic_rx_flush(struct ionic_cq *cq);
+void ionic_tx_flush(struct ionic_cq *cq);
+
+void ionic_rx_fill(struct ionic_queue *q);
+void ionic_rx_empty(struct ionic_queue *q);
+int ionic_rx_napi(struct napi_struct *napi, int budget);
+netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cb2ea8facd8d..3ab24fdccd3b 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1345,7 +1345,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.u.ipv4.dst =
nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
- if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) {
+ if (ipv4_is_multicast(info->key.u.ipv4.dst)) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE],
"Remote IPv4 address cannot be Multicast");
return -EINVAL;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 778d27d1fb15..ec23c166e67b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -757,6 +757,7 @@ struct r8152 {
u32 msg_enable;
u32 tx_qlen;
u32 coalesce;
+ u32 advertising;
u32 rx_buf_sz;
u32 rx_copybreak;
u32 rx_pending;
@@ -790,6 +791,13 @@ enum tx_csum_stat {
TX_CSUM_NONE
};
+#define RTL_ADVERTISED_10_HALF BIT(0)
+#define RTL_ADVERTISED_10_FULL BIT(1)
+#define RTL_ADVERTISED_100_HALF BIT(2)
+#define RTL_ADVERTISED_100_FULL BIT(3)
+#define RTL_ADVERTISED_1000_HALF BIT(4)
+#define RTL_ADVERTISED_1000_FULL BIT(5)
+
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
@@ -3801,90 +3809,117 @@ static void rtl8153b_disable(struct r8152 *tp)
r8153b_aldps_en(tp, true);
}
-static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
+static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
+ u32 advertising)
{
- u16 bmcr, anar, gbcr;
enum spd_duplex speed_duplex;
+ u16 bmcr;
int ret = 0;
- anar = r8152_mdio_read(tp, MII_ADVERTISE);
- anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- if (tp->mii.supports_gmii) {
- gbcr = r8152_mdio_read(tp, MII_CTRL1000);
- gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- } else {
- gbcr = 0;
- }
-
if (autoneg == AUTONEG_DISABLE) {
- if (speed == SPEED_10) {
- bmcr = 0;
- anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- speed_duplex = FORCE_10M_HALF;
- } else if (speed == SPEED_100) {
- bmcr = BMCR_SPEED100;
- anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
- speed_duplex = FORCE_100M_HALF;
- } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
- bmcr = BMCR_SPEED1000;
- gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- speed_duplex = NWAY_1000M_FULL;
- } else {
- ret = -EINVAL;
- goto out;
- }
+ if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
+ return -EINVAL;
- if (duplex == DUPLEX_FULL) {
- bmcr |= BMCR_FULLDPLX;
- if (speed != SPEED_1000)
- speed_duplex++;
- }
- } else {
- if (speed == SPEED_10) {
+ switch (speed) {
+ case SPEED_10:
+ bmcr = BMCR_SPEED10;
if (duplex == DUPLEX_FULL) {
- anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- speed_duplex = NWAY_10M_FULL;
+ bmcr |= BMCR_FULLDPLX;
+ speed_duplex = FORCE_10M_FULL;
} else {
- anar |= ADVERTISE_10HALF;
- speed_duplex = NWAY_10M_HALF;
+ speed_duplex = FORCE_10M_HALF;
}
- } else if (speed == SPEED_100) {
+ break;
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
if (duplex == DUPLEX_FULL) {
- anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
- speed_duplex = NWAY_100M_FULL;
+ bmcr |= BMCR_FULLDPLX;
+ speed_duplex = FORCE_100M_FULL;
} else {
- anar |= ADVERTISE_10HALF;
- anar |= ADVERTISE_100HALF;
- speed_duplex = NWAY_100M_HALF;
+ speed_duplex = FORCE_100M_HALF;
}
- } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
- if (duplex == DUPLEX_FULL) {
- anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
- gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else {
- anar |= ADVERTISE_10HALF;
- anar |= ADVERTISE_100HALF;
- gbcr |= ADVERTISE_1000HALF;
+ break;
+ case SPEED_1000:
+ if (tp->mii.supports_gmii) {
+ bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX;
+ speed_duplex = NWAY_1000M_FULL;
+ break;
}
- speed_duplex = NWAY_1000M_FULL;
- } else {
+ /* fall through */
+ default:
ret = -EINVAL;
goto out;
}
+ if (duplex == DUPLEX_FULL)
+ tp->mii.full_duplex = 1;
+ else
+ tp->mii.full_duplex = 0;
+
+ tp->mii.force_media = 1;
+ } else {
+ u16 anar, tmp1;
+ u32 support;
+
+ support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL |
+ RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL;
+
+ if (tp->mii.supports_gmii)
+ support |= RTL_ADVERTISED_1000_FULL;
+
+ if (!(advertising & support))
+ return -EINVAL;
+
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ if (advertising & RTL_ADVERTISED_10_HALF) {
+ tmp1 |= ADVERTISE_10HALF;
+ speed_duplex = NWAY_10M_HALF;
+ }
+ if (advertising & RTL_ADVERTISED_10_FULL) {
+ tmp1 |= ADVERTISE_10FULL;
+ speed_duplex = NWAY_10M_FULL;
+ }
+
+ if (advertising & RTL_ADVERTISED_100_HALF) {
+ tmp1 |= ADVERTISE_100HALF;
+ speed_duplex = NWAY_100M_HALF;
+ }
+ if (advertising & RTL_ADVERTISED_100_FULL) {
+ tmp1 |= ADVERTISE_100FULL;
+ speed_duplex = NWAY_100M_FULL;
+ }
+
+ if (anar != tmp1) {
+ r8152_mdio_write(tp, MII_ADVERTISE, tmp1);
+ tp->mii.advertising = tmp1;
+ }
+
+ if (tp->mii.supports_gmii) {
+ u16 gbcr;
+
+ gbcr = r8152_mdio_read(tp, MII_CTRL1000);
+ tmp1 = gbcr & ~(ADVERTISE_1000FULL |
+ ADVERTISE_1000HALF);
+
+ if (advertising & RTL_ADVERTISED_1000_FULL) {
+ tmp1 |= ADVERTISE_1000FULL;
+ speed_duplex = NWAY_1000M_FULL;
+ }
+
+ if (gbcr != tmp1)
+ r8152_mdio_write(tp, MII_CTRL1000, tmp1);
+ }
+
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
+
+ tp->mii.force_media = 0;
}
if (test_and_clear_bit(PHY_RESET, &tp->flags))
bmcr |= BMCR_RESET;
- if (tp->mii.supports_gmii)
- r8152_mdio_write(tp, MII_CTRL1000, gbcr);
-
- r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
switch (tp->version) {
@@ -4122,7 +4157,8 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
tp->rtl_ops.hw_phy_cfg(tp);
- rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex);
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex,
+ tp->advertising);
mutex_unlock(&tp->control);
@@ -4840,20 +4876,46 @@ static int rtl8152_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct r8152 *tp = netdev_priv(dev);
+ u32 advertising = 0;
int ret;
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out;
+ if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_10_HALF;
+
+ if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_10_FULL;
+
+ if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_100_HALF;
+
+ if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_100_FULL;
+
+ if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_1000_HALF;
+
+ if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_1000_FULL;
+
mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, advertising);
if (!ret) {
tp->autoneg = cmd->base.autoneg;
tp->speed = cmd->base.speed;
tp->duplex = cmd->base.duplex;
+ tp->advertising = advertising;
}
mutex_unlock(&tp->control);
@@ -5568,7 +5630,13 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id = R8152_PHY_ID;
tp->autoneg = AUTONEG_ENABLE;
- tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
+ tp->speed = SPEED_100;
+ tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL |
+ RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL;
+ if (tp->mii.supports_gmii) {
+ tp->speed = SPEED_1000;
+ tp->advertising |= RTL_ADVERTISED_1000_FULL;
+ }
tp->duplex = DUPLEX_FULL;
tp->rx_copybreak = RTL8152_RXFG_HEADSZ;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c2f056b5766d..8dd081051a79 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1162,6 +1162,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
@@ -1225,6 +1228,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(e_switch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_eswitch_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 52a56d741f79..3e80f03a387f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -626,6 +626,11 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg;
};
+enum mlx5_sw_icm_type {
+ MLX5_SW_ICM_TYPE_STEERING,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+};
+
#define MLX5_MAX_RESERVED_GIDS 8
struct mlx5_rsvd_gids {
@@ -657,11 +662,15 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
+struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -695,6 +704,7 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_dm *dm;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -1078,6 +1088,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
size_t *offsets);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id);
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 38a70d16d8d5..98e667b176ef 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -60,7 +60,6 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
u16 vport_num);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
u16 vport_num, u32 sqn);
@@ -74,7 +73,14 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
u16 vport_num);
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
+
+static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return MLX5_ESWITCH_NONE;
+}
+
static inline enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
{
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 97ec6be62ac4..724d276ea133 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -84,6 +84,8 @@ enum {
FDB_SLOW_PATH,
};
+struct mlx5_pkt_reformat;
+struct mlx5_modify_hdr;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -121,7 +123,7 @@ struct mlx5_flow_destination {
struct {
u16 num;
u16 vhca_id;
- u32 reformat_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
};
@@ -195,8 +197,8 @@ enum {
struct mlx5_flow_act {
u32 action;
- u32 reformat_id;
- u32 modify_id;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
uintptr_t esp_id;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
@@ -205,8 +207,6 @@ struct mlx5_flow_act {
#define MLX5_DECLARE_FLOW_ACT(name) \
struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- .reformat_id = 0, \
- .modify_id = 0, \
.flags = 0, }
/* Single destination per rule.
@@ -236,19 +236,18 @@ u32 mlx5_fc_id(struct mlx5_fc *counter);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id);
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
- u32 modify_header_id);
-
-int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
- enum mlx5_flow_namespace_type namespace,
- u32 *packet_reformat_id);
+ struct mlx5_modify_hdr *modify_hdr);
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- u32 packet_reformat_id);
+ struct mlx5_pkt_reformat *reformat);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 66b60afd5e06..7d65c0578ac9 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -282,6 +282,7 @@ enum {
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
+ MLX5_CMD_OP_SYNC_STEERING = 0xb00,
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
@@ -485,7 +486,11 @@ union mlx5_ifc_gre_key_bits {
};
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_at_0[0x8];
+ u8 gre_c_present[0x1];
+ u8 reserved_auto1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
u8 source_eswitch_owner_vhca_id[0x10];
@@ -565,12 +570,38 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 metadata_reg_b[0x20];
+
+ u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
- u8 reserved_at_0[0x120];
+ u8 inner_tcp_seq_num[0x20];
+
+ u8 outer_tcp_seq_num[0x20];
+
+ u8 inner_tcp_ack_num[0x20];
+
+ u8 outer_tcp_ack_num[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_b0[0x10];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmpv6_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+
u8 geneve_tlv_option_0_data[0x20];
+
u8 reserved_at_140[0xc0];
};
@@ -666,7 +697,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x7200];
+ u8 reserved_at_e00[0x1200];
+
+ u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_allow_icm_address[0x40];
+
+ u8 reserved_at_20c0[0x5f40];
};
enum {
@@ -698,7 +737,17 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x7800];
+ u8 reserved_at_800[0x1000];
+
+ u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
+
+ u8 sw_steering_fdb_action_drop_icm_address_tx[0x40];
+
+ u8 sw_steering_uplink_icm_address_rx[0x40];
+
+ u8 sw_steering_uplink_icm_address_tx[0x40];
+
+ u8 reserved_at_1900[0x6700];
};
enum {
@@ -849,6 +898,25 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_sync_steering_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+};
+
+struct mlx5_ifc_sync_steering_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -1042,6 +1110,12 @@ enum {
};
enum {
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
+ MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
+ MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+};
+
+enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
@@ -1414,7 +1488,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_6c0[0x4];
u8 flex_parser_id_geneve_tlv_option_0[0x4];
- u8 reserved_at_6c8[0x28];
+ u8 flex_parser_id_icmp_dw1[0x4];
+ u8 flex_parser_id_icmp_dw0[0x4];
+ u8 flex_parser_id_icmpv6_dw1[0x4];
+ u8 flex_parser_id_icmpv6_dw0[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
+
+ u8 reserved_at_6e0[0x10];
u8 sf_base_id[0x10];
u8 reserved_at_700[0x80];
@@ -2652,6 +2733,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
+ struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
u8 reserved_at_0[0x8000];
};
@@ -3255,7 +3337,11 @@ struct mlx5_ifc_esw_vport_context_bits {
u8 cvlan_pcp[0x3];
u8 cvlan_id[0xc];
- u8 reserved_at_60[0x7a0];
+ u8 reserved_at_60[0x720];
+
+ u8 sw_steering_vport_icm_address_rx[0x40];
+
+ u8 sw_steering_vport_icm_address_tx[0x40];
};
enum {
@@ -4941,23 +5027,98 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_query_flow_table_out_bits {
+struct mlx5_ifc_other_hca_cap_bits {
+ u8 roce[0x1];
+ u8 reserved_0[0x27f];
+};
+
+struct mlx5_ifc_query_other_hca_cap_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 reserved_0[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x80];
+ u8 reserved_1[0x40];
- u8 reserved_at_c0[0x8];
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_query_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_flow_table_context_bits {
+ u8 reformat_en[0x1];
+ u8 decap_en[0x1];
+ u8 sw_owner[0x1];
+ u8 termination_table[0x1];
+ u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_d0[0x8];
+ u8 reserved_at_10[0x8];
u8 log_size[0x8];
- u8 reserved_at_e0[0x120];
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0x60];
+
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x80];
+
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_query_flow_table_in_bits {
@@ -5227,7 +5388,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
-enum {
+enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
@@ -5323,7 +5484,16 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -7371,35 +7541,26 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 klm_pas_mtt[0][0x20];
};
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2,
+ MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3,
+ MLX5_FLOW_TABLE_TYPE_FDB = 0X4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6,
+};
+
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 table_id[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_flow_table_context_bits {
- u8 reformat_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_2[0x1];
- u8 termination_table[0x1];
- u8 table_miss_action[0x4];
- u8 level[0x8];
- u8 reserved_at_10[0x8];
- u8 log_size[0x8];
-
- u8 reserved_at_20[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_40[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_60[0xe0];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_flow_table_in_bits {
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 13523b0a0642..460bc629d1a4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -458,6 +458,13 @@ enum devlink_param_generic_id {
/* Maker of the board */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture"
+/* Part number, identifier of asic design */
+#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id"
+/* Revision of asic design */
+#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev"
+
+/* Overall FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW "fw"
/* Control processor FW version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt"
/* Data path microcode controlling high-speed packet processing */
diff --git a/include/net/tls.h b/include/net/tls.h
index ec3c3ed2c6c3..c664e6dba0d1 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -275,16 +275,6 @@ struct tls_context {
struct proto *sk_proto;
void (*sk_destruct)(struct sock *sk);
- void (*sk_proto_close)(struct sock *sk, long timeout);
-
- int (*setsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- unsigned int optlen);
- int (*getsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen);
- int (*hash)(struct sock *sk);
- void (*unhash)(struct sock *sk);
union tls_crypto_context crypto_send;
union tls_crypto_context crypto_recv;
@@ -376,13 +366,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
-void tls_device_free_resources_tx(struct sock *sk);
-void tls_device_init(void);
-void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags);
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
@@ -659,7 +645,6 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
@@ -672,9 +657,40 @@ int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+#ifdef CONFIG_TLS_DEVICE
+void tls_device_init(void);
+void tls_device_cleanup(void);
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
+void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
-
void tls_device_offload_cleanup_rx(struct sock *sk);
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+#else
+static inline void tls_device_init(void) {}
+static inline void tls_device_cleanup(void) {}
+static inline int
+tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_free_resources_tx(struct sock *sk) {}
+
+static inline int
+tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
+static inline void
+tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
+
+static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+#endif
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index dc1583a1fb8a..335283dbe9b3 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -391,7 +391,7 @@ static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
if (ipa->sa.sa_family == AF_INET6)
return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
else
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+ return ipv4_is_multicast(ipa->sin.sin_addr.s_addr);
}
#else /* !IS_ENABLED(CONFIG_IPV6) */
@@ -403,7 +403,7 @@ static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
{
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+ return ipv4_is_multicast(ipa->sin.sin_addr.s_addr);
}
#endif /* IS_ENABLED(CONFIG_IPV6) */
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 2977137c28eb..1a5bf3fa4578 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -559,7 +559,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
ret = -EDESTADDRREQ;
break;
}
- if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) ||
+ if (ipv4_is_multicast(sin->sin_addr.s_addr) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) {
ret = -EINVAL;
break;
@@ -593,7 +593,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4))) {
+ ipv4_is_multicast(addr4)) {
ret = -EPROTOTYPE;
break;
}
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 0f4398e7f2a7..6dbb763bc1fd 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -181,7 +181,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < sizeof(struct sockaddr_in) ||
sin->sin_addr.s_addr == htonl(INADDR_ANY) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ ipv4_is_multicast(sin->sin_addr.s_addr))
return -EINVAL;
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr);
binding_addr = &v6addr;
@@ -206,7 +206,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4)))
+ ipv4_is_multicast(addr4))
return -EINVAL;
}
/* The scope ID must be specified for link local address. */
diff --git a/net/rds/send.c b/net/rds/send.c
index 9ce552abf9e9..82dcd8b84fe7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1144,7 +1144,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
case AF_INET:
if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) {
+ ipv4_is_multicast(usin->sin_addr.s_addr)) {
ret = -EINVAL;
goto out;
}
@@ -1175,7 +1175,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4))) {
+ ipv4_is_multicast(addr4)) {
ret = -EINVAL;
goto out;
}
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 810645b5c086..93b58fde99b7 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -299,7 +299,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
- int port_rate = -1;
+ int port_rate;
int err;
err = __ethtool_get_link_ksettings(dev, &ecmd);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index e188139f0464..41c106e45f01 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -159,12 +159,8 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
spin_lock_irqsave(&ctx->lock, flags);
info = ctx->retransmit_hint;
- if (info && !before(acked_seq, info->end_seq)) {
+ if (info && !before(acked_seq, info->end_seq))
ctx->retransmit_hint = NULL;
- list_del(&info->list);
- destroy_record(info);
- deleted_records++;
- }
list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
if (before(acked_seq, info->end_seq))
@@ -838,22 +834,18 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
struct net_device *netdev;
char *iv, *rec_seq;
struct sk_buff *skb;
- int rc = -EINVAL;
__be64 rcd_sn;
+ int rc;
if (!ctx)
- goto out;
+ return -EINVAL;
- if (ctx->priv_ctx_tx) {
- rc = -EEXIST;
- goto out;
- }
+ if (ctx->priv_ctx_tx)
+ return -EEXIST;
start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
- if (!start_marker_record) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!start_marker_record)
+ return -ENOMEM;
offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
if (!offload_ctx) {
@@ -939,17 +931,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
if (skb)
TCP_SKB_CB(skb)->eor = 1;
- /* We support starting offload on multiple sockets
- * concurrently, so we only need a read lock here.
- * This lock must precede get_netdev_for_sock to prevent races between
- * NETDEV_DOWN and setsockopt.
- */
- down_read(&device_offload_lock);
netdev = get_netdev_for_sock(sk);
if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__);
rc = -EINVAL;
- goto release_lock;
+ goto disable_cad;
}
if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
@@ -960,10 +946,15 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
/* Avoid offloading if the device is down
* We don't want to offload new flows after
* the NETDEV_DOWN event
+ *
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+ * handler thus protecting from the device going down before
+ * ctx was added to tls_device_list.
*/
+ down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL;
- goto release_netdev;
+ goto release_lock;
}
ctx->priv_ctx_tx = offload_ctx;
@@ -971,9 +962,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
&ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
if (rc)
- goto release_netdev;
+ goto release_lock;
tls_device_attach(ctx, sk, netdev);
+ up_read(&device_offload_lock);
/* following this assignment tls_is_sk_tx_device_offloaded
* will return true and the context might be accessed
@@ -981,13 +973,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
*/
smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
dev_put(netdev);
- up_read(&device_offload_lock);
- goto out;
-release_netdev:
- dev_put(netdev);
+ return 0;
+
release_lock:
up_read(&device_offload_lock);
+release_netdev:
+ dev_put(netdev);
+disable_cad:
clean_acked_data_disable(inet_csk(sk));
crypto_free_aead(offload_ctx->aead_send);
free_rec_seq:
@@ -999,7 +992,6 @@ free_offload_ctx:
ctx->priv_ctx_tx = NULL;
free_marker_record:
kfree(start_marker_record);
-out:
return rc;
}
@@ -1012,17 +1004,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
return -EOPNOTSUPP;
- /* We support starting offload on multiple sockets
- * concurrently, so we only need a read lock here.
- * This lock must precede get_netdev_for_sock to prevent races between
- * NETDEV_DOWN and setsockopt.
- */
- down_read(&device_offload_lock);
netdev = get_netdev_for_sock(sk);
if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__);
- rc = -EINVAL;
- goto release_lock;
+ return -EINVAL;
}
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
@@ -1033,16 +1018,21 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
/* Avoid offloading if the device is down
* We don't want to offload new flows after
* the NETDEV_DOWN event
+ *
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+ * handler thus protecting from the device going down before
+ * ctx was added to tls_device_list.
*/
+ down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL;
- goto release_netdev;
+ goto release_lock;
}
context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
if (!context) {
rc = -ENOMEM;
- goto release_netdev;
+ goto release_lock;
}
context->resync_nh_reset = 1;
@@ -1058,7 +1048,11 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
goto free_sw_resources;
tls_device_attach(ctx, sk, netdev);
- goto release_netdev;
+ up_read(&device_offload_lock);
+
+ dev_put(netdev);
+
+ return 0;
free_sw_resources:
up_read(&device_offload_lock);
@@ -1066,10 +1060,10 @@ free_sw_resources:
down_read(&device_offload_lock);
release_ctx:
ctx->priv_ctx_rx = NULL;
-release_netdev:
- dev_put(netdev);
release_lock:
up_read(&device_offload_lock);
+release_netdev:
+ dev_put(netdev);
return rc;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 277f7c209fed..ac88877dcade 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -286,19 +286,14 @@ static void tls_sk_proto_cleanup(struct sock *sk,
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_release_resources_tx(sk);
-#ifdef CONFIG_TLS_DEVICE
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
-#endif
}
if (ctx->rx_conf == TLS_SW)
tls_sw_release_resources_rx(sk);
-
-#ifdef CONFIG_TLS_DEVICE
- if (ctx->rx_conf == TLS_HW)
+ else if (ctx->rx_conf == TLS_HW)
tls_device_offload_cleanup_rx(sk);
-#endif
}
static void tls_sk_proto_close(struct sock *sk, long timeout)
@@ -331,7 +326,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
tls_sw_strparser_done(ctx);
if (ctx->rx_conf == TLS_SW)
tls_sw_free_ctx_rx(ctx);
- ctx->sk_proto_close(sk, timeout);
+ ctx->sk_proto->close(sk, timeout);
if (free_ctx)
tls_ctx_free(sk, ctx);
@@ -451,7 +446,8 @@ static int tls_getsockopt(struct sock *sk, int level, int optname,
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
- return ctx->getsockopt(sk, level, optname, optval, optlen);
+ return ctx->sk_proto->getsockopt(sk, level,
+ optname, optval, optlen);
return do_tls_getsockopt(sk, optname, optval, optlen);
}
@@ -536,26 +532,18 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
}
if (tx) {
-#ifdef CONFIG_TLS_DEVICE
rc = tls_set_device_offload(sk, ctx);
conf = TLS_HW;
if (rc) {
-#else
- {
-#endif
rc = tls_set_sw_offload(sk, ctx, 1);
if (rc)
goto err_crypto_info;
conf = TLS_SW;
}
} else {
-#ifdef CONFIG_TLS_DEVICE
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
if (rc) {
-#else
- {
-#endif
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto err_crypto_info;
@@ -609,7 +597,8 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
- return ctx->setsockopt(sk, level, optname, optval, optlen);
+ return ctx->sk_proto->setsockopt(sk, level, optname, optval,
+ optlen);
return do_tls_setsockopt(sk, optname, optval, optlen);
}
@@ -624,10 +613,7 @@ static struct tls_context *create_ctx(struct sock *sk)
return NULL;
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
- ctx->setsockopt = sk->sk_prot->setsockopt;
- ctx->getsockopt = sk->sk_prot->getsockopt;
- ctx->sk_proto_close = sk->sk_prot->close;
- ctx->unhash = sk->sk_prot->unhash;
+ ctx->sk_proto = sk->sk_prot;
return ctx;
}
@@ -683,9 +669,6 @@ static int tls_hw_prot(struct sock *sk)
spin_unlock_bh(&device_spinlock);
tls_build_proto(sk);
- ctx->hash = sk->sk_prot->hash;
- ctx->unhash = sk->sk_prot->unhash;
- ctx->sk_proto_close = sk->sk_prot->close;
ctx->sk_destruct = sk->sk_destruct;
sk->sk_destruct = tls_hw_sk_destruct;
ctx->rx_conf = TLS_HW_RECORD;
@@ -717,7 +700,7 @@ static void tls_hw_unhash(struct sock *sk)
}
}
spin_unlock_bh(&device_spinlock);
- ctx->unhash(sk);
+ ctx->sk_proto->unhash(sk);
}
static int tls_hw_hash(struct sock *sk)
@@ -726,7 +709,7 @@ static int tls_hw_hash(struct sock *sk)
struct tls_device *dev;
int err;
- err = ctx->hash(sk);
+ err = ctx->sk_proto->hash(sk);
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->hash) {
@@ -816,7 +799,6 @@ static int tls_init(struct sock *sk)
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
- ctx->sk_proto = sk->sk_prot;
update_sk_prot(sk, ctx);
out:
write_unlock_bh(&sk->sk_callback_lock);
@@ -828,12 +810,10 @@ static void tls_update(struct sock *sk, struct proto *p)
struct tls_context *ctx;
ctx = tls_get_ctx(sk);
- if (likely(ctx)) {
- ctx->sk_proto_close = p->close;
+ if (likely(ctx))
ctx->sk_proto = p;
- } else {
+ else
sk->sk_prot = p;
- }
}
static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
@@ -927,9 +907,7 @@ static int __init tls_register(void)
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
-#ifdef CONFIG_TLS_DEVICE
tls_device_init();
-#endif
tcp_register_ulp(&tcp_tls_ulp_ops);
return 0;
@@ -938,9 +916,7 @@ static int __init tls_register(void)
static void __exit tls_unregister(void)
{
tcp_unregister_ulp(&tcp_tls_ulp_ops);
-#ifdef CONFIG_TLS_DEVICE
tls_device_cleanup();
-#endif
}
module_init(tls_register);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 91d21b048a9b..c2b5e0d2ba1a 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1489,13 +1489,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
int pad, err = 0;
if (!ctx->decrypted) {
-#ifdef CONFIG_TLS_DEVICE
if (tls_ctx->rx_conf == TLS_HW) {
err = tls_device_decrypted(sk, skb);
if (err < 0)
return err;
}
-#endif
+
/* Still not decrypted after tls_device */
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
@@ -2014,10 +2013,9 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
ret = -EINVAL;
goto read_failure;
}
-#ifdef CONFIG_TLS_DEVICE
+
tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
TCP_SKB_CB(skb)->seq + rxm->offset);
-#endif
return data_len + TLS_HEADER_SIZE;
read_failure:
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 94cc0fa3e848..5bb70c692b1e 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -307,8 +307,13 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
spin_unlock_bh(&vvs->rx_lock);
- /* We send a credit update only when the space available seen
- * by the transmitter is less than VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
+ /* To reduce the number of credit update messages,
+ * don't update credits as long as lots of space is available.
+ * Note: the limit chosen here is arbitrary. Setting the limit
+ * too high causes extra messages. Too low causes transmitter
+ * stalls. As stalls are in theory more expensive than extra
+ * messages, we set the limit to a high value. TODO: experiment
+ * with different values.
*/
if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
virtio_transport_send_credit_update(vsk,