aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c27
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1119
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c252
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c608
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h85
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c34
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c294
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c22
16 files changed, 2135 insertions, 344 deletions
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index fb4d77be019b..0440966bc6ec 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,5 +1,5 @@
config MLX5_INFINIBAND
- tristate "Mellanox Connect-IB HCA support"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index d42b922bede8..b8e4b15e2674 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index e6bde32a83f3..ffd03bf1a71e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -37,7 +37,6 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct rdma_ah_attr *ah_attr)
{
enum ib_gid_type gid_type;
- int err;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -53,18 +52,12 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
- ah_attr->grh.sgid_index,
- &gid_type);
- if (err)
- return ERR_PTR(err);
+ gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev,
- rdma_ah_get_port_num(ah_attr),
- rdma_ah_read_grh(ah_attr)->sgid_index);
+ mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 188512bf46e6..c84fef9a8a08 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -32,6 +32,21 @@
#include "cmd.h"
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
+ return err;
+}
+
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
@@ -170,3 +185,15 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
return err;
}
+
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
+ 0, 0);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index e7206c8a8011..88cbb1c41703 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,9 +37,11 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 985fa2637390..7e4e358a4fd8 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -359,9 +359,6 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
int ret;
char lbuf[11];
- if (*pos)
- return 0;
-
ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -370,11 +367,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (ret < 0)
return ret;
- if (copy_to_user(buf, lbuf, ret))
- return -EFAULT;
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, lbuf, ret);
}
static const struct file_operations dbg_cc_fops = {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index ad39d64b8108..088205d7f1a1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1184,7 +1184,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
- return -ENOSYS;
+ return -EOPNOTSUPP;
if (cq_period > MLX5_MAX_CQ_PERIOD)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
new file mode 100644
index 000000000000..ac116d63e466
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
+struct devx_obj {
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+ u32 dinlen; /* destroy inbox length */
+ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+};
+
+struct devx_umem {
+ struct mlx5_core_dev *mdev;
+ struct ib_umem *umem;
+ u32 page_offset;
+ int page_shift;
+ int ncont;
+ u32 dinlen;
+ u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
+};
+
+struct devx_umem_reg_cmd {
+ void *in;
+ u32 inlen;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+};
+
+static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
+{
+ return to_mucontext(ib_uverbs_get_ucontext(file));
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u64 general_obj_types;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
+
+ general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
+ !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return 0;
+}
+
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+
+ mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_DESTROY_TIR:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
+ obj_id);
+ return true;
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
+ table_id);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u32 obj_id;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MKEY:
+ obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ break;
+ case MLX5_CMD_OP_QUERY_CQ:
+ obj_id = MLX5_GET(query_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_CQ:
+ obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SQ:
+ obj_id = MLX5_GET(query_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_SQ:
+ obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQ:
+ obj_id = MLX5_GET(query_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQ:
+ obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RMP:
+ obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RMP:
+ obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQT:
+ obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQT:
+ obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIR:
+ obj_id = MLX5_GET(query_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIR:
+ obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIS:
+ obj_id = MLX5_GET(query_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIS:
+ obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(query_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(modify_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ break;
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_QUERY_QP:
+ obj_id = MLX5_GET(query_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_QUERY_DCT:
+ obj_id = MLX5_GET(query_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRQ:
+ obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SRQ:
+ obj_id = MLX5_GET(query_srq_in, in, srqn);
+ break;
+ case MLX5_CMD_OP_ARM_RQ:
+ obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ break;
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_ARM_XRQ:
+ obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ break;
+ default:
+ return false;
+ }
+
+ if (obj_id == obj->obj_id)
+ return true;
+
+ return false;
+}
+
+static bool devx_is_obj_create_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+ if (op_mod == 0)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_modify_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_XRQ:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+
+ if (op_mod == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_query_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_general_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int user_vector;
+ int dev_eqn;
+ unsigned int irqn;
+ int err;
+
+ if (uverbs_copy_from(&user_vector, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
+ return -EFAULT;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+ if (err < 0)
+ return err;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ &dev_eqn, sizeof(dev_eqn)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *Security note:
+ * The hardware protection mechanism works like this: Each device object that
+ * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
+ * the device specification manual) upon its creation. Then upon doorbell,
+ * hardware fetches the object context for which the doorbell was rang, and
+ * validates that the UAR through which the DB was rang matches the UAR ID
+ * of the object.
+ * If no match the doorbell is silently ignored by the hardware. Of course,
+ * the user cannot ring a doorbell on a UAR that was not mapped to it.
+ * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
+ * mailboxes (except tagging them with UID), we expose to the user its UAR
+ * ID, so it can embed it in these objects in the expected specification
+ * format. So the only thing the user can do is hurt itself by creating a
+ * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
+ * may ring a doorbell on its objects.
+ * The consequence of that will be that another user can schedule a QP/SQ
+ * of the buggy user for execution (just insert it to the hardware schedule
+ * queue or arm its CQ for event generation), no further harm is expected.
+ */
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 user_idx;
+ s32 dev_idx;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (uverbs_copy_from(&user_idx, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
+ return -EFAULT;
+
+ dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ &dev_idx, sizeof(dev_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ void *cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
+ void *cmd_out;
+ int err;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ /* Only white list of some general HCA commands are allowed for this method. */
+ if (!devx_is_general_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ cmd_out_len);
+}
+
+static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ u32 *dinlen,
+ u32 *obj_id)
+{
+ u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
+ u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
+
+ switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
+ break;
+
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ break;
+ case MLX5_CMD_OP_ALLOC_PD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ break;
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ break;
+ case MLX5_CMD_OP_CREATE_RMP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ break;
+ case MLX5_CMD_OP_CREATE_SQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ break;
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
+ *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
+ MLX5_SET(destroy_flow_table_in, din, other_vport,
+ MLX5_GET(create_flow_table_in, in, other_vport));
+ MLX5_SET(destroy_flow_table_in, din, vport_number,
+ MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, table_type,
+ MLX5_GET(create_flow_table_in, in, table_type));
+ MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
+ *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
+ MLX5_SET(destroy_flow_group_in, din, other_vport,
+ MLX5_GET(create_flow_group_in, in, other_vport));
+ MLX5_SET(destroy_flow_group_in, din, vport_number,
+ MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, table_type,
+ MLX5_GET(create_flow_group_in, in, table_type));
+ MLX5_SET(destroy_flow_group_in, din, table_id,
+ MLX5_GET(create_flow_group_in, in, table_id));
+ MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
+ *obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ MLX5_SET(delete_fte_in, din, other_vport,
+ MLX5_GET(set_fte_in, in, other_vport));
+ MLX5_SET(delete_fte_in, din, vport_number,
+ MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, table_type,
+ MLX5_GET(set_fte_in, in, table_type));
+ MLX5_SET(delete_fte_in, din, table_id,
+ MLX5_GET(set_fte_in, in, table_id));
+ MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ break;
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ break;
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ break;
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
+ *obj_id = MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_hierarchy,
+ MLX5_GET(create_scheduling_element_in, in,
+ scheduling_hierarchy));
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_element_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
+ *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
+ *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_CREATE_QP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ break;
+ case MLX5_CMD_OP_CREATE_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_DCT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ break;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ break;
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
+ MLX5_SET(detach_from_mcg_in, din, qpn,
+ MLX5_GET(attach_to_mcg_in, in, qpn));
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
+ MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
+ MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ break;
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ break;
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int devx_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct devx_obj *obj = uobject->object;
+ int ret;
+
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ kfree(obj);
+ return ret;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ void *cmd_out;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ struct devx_obj *obj;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_create_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ return 0;
+
+obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_modify_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj)
+{
+ u64 addr;
+ size_t size;
+ u32 access;
+ int npages;
+ int err;
+ u32 page_mask;
+
+ if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
+ uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
+ return -EFAULT;
+
+ err = uverbs_get_flags32(&access, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_SUPPORTED);
+ if (err)
+ return err;
+
+ err = ib_check_mr_access(access);
+ if (err)
+ return err;
+
+ obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+
+ mlx5_ib_cont_pages(obj->umem, obj->umem->address,
+ MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
+ &obj->page_shift, &obj->ncont, NULL);
+
+ if (!npages) {
+ ib_umem_release(obj->umem);
+ return -EINVAL;
+ }
+
+ page_mask = (1 << obj->page_shift) - 1;
+ obj->page_offset = obj->umem->address & page_mask;
+
+ return 0;
+}
+
+static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
+ (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
+ cmd->in = uverbs_zalloc(attrs, cmd->inlen);
+ return PTR_ERR_OR_ZERO(cmd->in);
+}
+
+static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ void *umem;
+ __be64 *mtt;
+
+ umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
+ mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
+ MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
+ MLX5_SET(umem, umem, log_page_size, obj->page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset, obj->page_offset);
+ mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
+ (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
+ MLX5_IB_MTT_READ);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem_reg_cmd cmd;
+ struct devx_umem *obj;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+ u32 obj_id;
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ if (err)
+ goto err_obj_free;
+
+ err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
+ if (err)
+ goto err_umem_release;
+
+ devx_umem_reg_cmd_build(dev, obj, &cmd);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
+ sizeof(cmd.out));
+ if (err)
+ goto err_umem_release;
+
+ obj->mdev = dev->mdev;
+ uobj->object = obj;
+ devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
+ if (err)
+ goto err_umem_destroy;
+
+ return 0;
+
+err_umem_destroy:
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
+err_umem_release:
+ ib_umem_release(obj->umem);
+err_obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int devx_umem_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct devx_umem *obj = uobject->object;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+
+ err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(err, why, uobject))
+ return err;
+
+ ib_umem_release(obj->umem);
+ kfree(obj);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_UMEM_REG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OTHER,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+DECLARE_UVERBS_OBJECT_TREE(devx_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
+
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
+{
+ return &devx_objects;
+}
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
new file mode 100644
index 000000000000..1a29f47f836e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
+ [MLX5_IB_FLOW_TYPE_NORMAL] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ .u.ptr = {
+ .len = sizeof(u16), /* data is priority */
+ .min_len = sizeof(u16),
+ }
+ },
+ [MLX5_IB_FLOW_TYPE_SNIFFER] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ void *devx_obj;
+ int dest_id, dest_type;
+ void *cmd_in;
+ int inlen;
+ bool dest_devx, dest_qp;
+ struct ib_qp *qp = NULL;
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ dest_devx =
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_qp = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+
+ if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ return -EINVAL;
+
+ if (dest_devx) {
+ devx_obj = uverbs_attr_get_obj(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ if (IS_ERR(devx_obj))
+ return PTR_ERR(devx_obj);
+
+ /* Verify that the given DEVX object is a flow
+ * steering destination.
+ */
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ return -EINVAL;
+ } else {
+ struct mlx5_ib_qp *mqp;
+
+ qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ if (qp->qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ mqp = to_mqp(qp);
+ if (mqp->flags & MLX5_IB_QP_RSS)
+ dest_id = mqp->rss_qp.tirn;
+ else
+ dest_id = mqp->raw_packet_qp.rq.tirn;
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ }
+
+ if (dev->rep)
+ return -ENOTSUPP;
+
+ cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+ dest_id, dest_type);
+ if (IS_ERR(flow_handler))
+ return PTR_ERR(flow_handler);
+
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+
+ return 0;
+}
+
+static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct mlx5_ib_flow_matcher *obj = uobject->object;
+ int ret;
+
+ ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
+ if (ret)
+ return ret;
+
+ kfree(obj);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct mlx5_ib_flow_matcher *obj;
+ int err;
+
+ obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->mask_len = uverbs_attr_get_len(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ err = uverbs_copy_from(&obj->matcher_mask,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ if (err)
+ goto end;
+
+ obj->flow_type = uverbs_attr_get_enum_id(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+
+ if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
+ err = uverbs_copy_from(&obj->priority,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+ if (err)
+ goto end;
+ }
+
+ err = uverbs_copy_from(&obj->match_criteria_enable,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
+ if (err)
+ goto end;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ atomic_set(&obj->usecnt, 0);
+ return 0;
+
+end:
+ kfree(obj);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_CREATE_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DESTROY_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_fs,
+ UVERBS_OBJECT_FLOW,
+ &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ mlx5_ib_flow_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+
+DECLARE_UVERBS_OBJECT_TREE(flow_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER));
+
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ int i = 0;
+
+ root[i++] = &flow_objects;
+ root[i++] = &mlx5_ib_fs;
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 79e6309460dc..4950df3f71b6 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -477,8 +477,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
return gsi->tx_qps[qp_index];
}
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
struct ib_qp *tx_qp;
@@ -522,8 +522,8 @@ err:
return ret;
}
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e52dd21519b4..c414f3809e5c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -419,8 +419,8 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
- props->port_cap_flags |= IB_PORT_CM_SUP;
- props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
roce_address_table_size);
@@ -510,12 +510,11 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
vlan_id, port_num);
}
-static int mlx5_ib_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
+static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, gid, attr);
+ attr->index, &attr->gid, attr);
}
static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
@@ -525,41 +524,15 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index)
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
- struct ib_gid_attr attr;
- union ib_gid gid;
-
- if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
- return 0;
-
- dev_put(attr.ndev);
-
- if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type)
-{
- struct ib_gid_attr attr;
- union ib_gid gid;
- int ret;
-
- ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
- if (ret)
- return ret;
-
- dev_put(attr.ndev);
-
- *gid_type = attr.gid_type;
-
- return 0;
-}
-
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
@@ -915,7 +888,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
- props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_send_sge = max_sq_sg;
+ props->max_recv_sge = max_rq_sg;
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
@@ -1246,7 +1220,6 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
- props->grh_required = rep->grh_required;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
@@ -1585,31 +1558,26 @@ error:
return err;
}
-static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+static void deallocate_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
{
struct mlx5_bfreg_info *bfregi;
- int err;
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
- bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
- return err;
- }
- }
- }
-
- return 0;
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
{
int err;
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return 0;
+
err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
if (err)
return err;
@@ -1631,6 +1599,9 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
{
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return;
+
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
@@ -1660,6 +1631,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int err;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
+ u32 dump_fill_mkey;
bool lib_uar_4k;
if (!dev->ib_active)
@@ -1676,8 +1648,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
return ERR_PTR(err);
- if (req.flags)
- return ERR_PTR(-EINVAL);
+ if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
+ return ERR_PTR(-EOPNOTSUPP);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP);
@@ -1755,10 +1727,26 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ if (err)
+ goto out_uars;
+
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ /* Block DEVX on Infiniband as of SELinux */
+ if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
+ err = -EPERM;
+ goto out_td;
+ }
+
+ err = mlx5_ib_devx_create(dev, context);
if (err)
- goto out_uars;
+ goto out_td;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
+ if (err)
+ goto out_mdev;
}
INIT_LIST_HEAD(&context->vma_private_list);
@@ -1819,9 +1807,18 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
+ if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ resp.dump_fill_mkey = dump_fill_mkey;
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
+ }
+ resp.response_length += sizeof(resp.dump_fill_mkey);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
- goto out_td;
+ goto out_mdev;
bfregi->ver = ver;
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
@@ -1831,9 +1828,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
return &context->ibucontext;
+out_mdev:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context);
out_td:
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_uars:
deallocate_uars(dev, context);
@@ -1856,9 +1855,11 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context);
+
bfregi = &context->bfregi;
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -2040,7 +2041,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
unsigned long idx;
- phys_addr_t pfn, pa;
+ phys_addr_t pfn;
pgprot_t prot;
u32 bfreg_dyn_idx = 0;
u32 uar_index;
@@ -2131,8 +2132,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
goto err;
}
- pa = pfn << PAGE_SHIFT;
-
err = mlx5_ib_set_vma_data(vma, context);
if (err)
goto err;
@@ -2699,7 +2698,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
IPPROTO_GRE);
MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
- 0xffff);
+ ntohs(ib_spec->gre.mask.protocol));
MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
ntohs(ib_spec->gre.val.protocol));
@@ -2979,11 +2978,11 @@ static void counters_clear_description(struct ib_counters *counters)
static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
{
- struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
struct mlx5_ib_flow_handler *handler = container_of(flow_id,
struct mlx5_ib_flow_handler,
ibflow);
struct mlx5_ib_flow_handler *iter, *tmp;
+ struct mlx5_ib_dev *dev = handler->dev;
mutex_lock(&dev->flow_db->lock);
@@ -3001,6 +3000,8 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
counters_clear_description(handler->ibcounters);
mutex_unlock(&dev->flow_db->lock);
+ if (handler->flow_matcher)
+ atomic_dec(&handler->flow_matcher->usecnt);
kfree(handler);
return 0;
@@ -3021,6 +3022,26 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+ struct mlx5_ib_flow_prio *prio,
+ int priority,
+ int num_entries, int num_groups)
+{
+ struct mlx5_flow_table *ft;
+
+ ft = mlx5_create_auto_grouped_flow_table(ns, priority,
+ num_entries,
+ num_groups,
+ 0, 0);
+ if (IS_ERR(ft))
+ return ERR_CAST(ft);
+
+ prio->flow_table = ft;
+ prio->refcount = 0;
+ return prio;
+}
+
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
struct ib_flow_attr *flow_attr,
enum flow_table_type ft_type)
@@ -3033,7 +3054,6 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int num_entries;
int num_groups;
int priority;
- int err = 0;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
@@ -3083,21 +3103,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return ERR_PTR(-ENOMEM);
ft = prio->flow_table;
- if (!ft) {
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, 0);
-
- if (!IS_ERR(ft)) {
- prio->refcount = 0;
- prio->flow_table = ft;
- } else {
- err = PTR_ERR(ft);
- }
- }
+ if (!ft)
+ return _get_prio(ns, prio, priority, num_entries, num_groups);
- return err ? ERR_PTR(err) : prio;
+ return prio;
}
static void set_underlay_qp(struct mlx5_ib_dev *dev,
@@ -3199,8 +3208,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
if (!mcounters->hw_cntrs_hndl) {
mcounters->hw_cntrs_hndl = mlx5_fc_create(
to_mdev(ibcounters->device)->mdev, false);
- if (!mcounters->hw_cntrs_hndl) {
- ret = -ENOMEM;
+ if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+ ret = PTR_ERR(mcounters->hw_cntrs_hndl);
goto free;
}
hw_hndl = true;
@@ -3356,6 +3365,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ft_prio->refcount++;
handler->prio = ft_prio;
+ handler->dev = dev;
ft_prio->flow_table = ft;
free:
@@ -3546,29 +3556,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
- if (err) {
- kfree(ucmd);
- return ERR_PTR(err);
- }
+ if (err)
+ goto free_ucmd;
}
- if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
- return ERR_PTR(-ENOMEM);
+ if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
if (domain != IB_FLOW_DOMAIN_USER ||
flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
- IB_FLOW_ATTR_FLAGS_EGRESS)))
- return ERR_PTR(-EINVAL);
+ IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
if (is_egress &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
- flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
- return ERR_PTR(-EINVAL);
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
- if (!dst)
- return ERR_PTR(-ENOMEM);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
mutex_lock(&dev->flow_db->lock);
@@ -3637,8 +3653,191 @@ destroy_ft:
unlock:
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+free_ucmd:
kfree(ucmd);
- kfree(handler);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
+ int priority, bool mcast)
+{
+ int max_table_size;
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio;
+
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (max_table_size < MLX5_FS_MAX_ENTRIES)
+ return ERR_PTR(-ENOMEM);
+
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(priority, false);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ prio = &dev->flow_db->prios[priority];
+
+ if (prio->flow_table)
+ return prio;
+
+ return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+ MLX5_FS_MAX_TYPES);
+}
+
+static struct mlx5_ib_flow_handler *
+_create_raw_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct mlx5_flow_destination *dst,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen)
+{
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ memcpy(spec->match_value, cmd_in, inlen);
+ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
+ fs_matcher->mask_len);
+ spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dst, 1);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+ ft_prio->flow_table = ft;
+
+free:
+ if (err)
+ kfree(handler);
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
+ void *match_v)
+{
+ void *match_c;
+ void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
+ void *dmac, *dmac_mask;
+ void *ipv4, *ipv4_mask;
+
+ if (!(fs_matcher->match_criteria_enable &
+ (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
+ return false;
+
+ match_c = fs_matcher->matcher_mask.match_params;
+ match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+
+ dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dmac_47_16);
+ dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dmac_47_16);
+
+ if (is_multicast_ether_addr(dmac) &&
+ is_multicast_ether_addr(dmac_mask))
+ return true;
+
+ ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
+ ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
+ return true;
+
+ return false;
+}
+
+struct mlx5_ib_flow_handler *
+mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id,
+ int dest_type)
+{
+ struct mlx5_flow_destination *dst;
+ struct mlx5_ib_flow_prio *ft_prio;
+ int priority = fs_matcher->priority;
+ struct mlx5_ib_flow_handler *handler;
+ bool mcast;
+ int err;
+
+ if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOMEM);
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, priority, mcast);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
+ dst->type = dest_type;
+ dst->tir_num = dest_id;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+ dst->ft_num = dest_id;
+ }
+
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
+ inlen);
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ atomic_inc(&fs_matcher->usecnt);
+ handler->flow_matcher = fs_matcher;
+
+ kfree(dst);
+
+ return handler;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+
return ERR_PTR(err);
}
@@ -3666,12 +3865,11 @@ mlx5_ib_create_flow_action_esp(struct ib_device *device,
u64 flags;
int err = 0;
- if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&action_flags, attrs,
- MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS)))
- return ERR_PTR(-EFAULT);
-
- if (action_flags >= (MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1))
- return ERR_PTR(-EOPNOTSUPP);
+ err = uverbs_get_flags64(
+ &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
+ if (err)
+ return ERR_PTR(err);
flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
@@ -4460,7 +4658,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
cancel_work_sync(&devr->ports[port].pkey_change_work);
}
-static u32 get_core_cap_flags(struct ib_device *ibdev)
+static u32 get_core_cap_flags(struct ib_device *ibdev,
+ struct mlx5_hca_vport_context *rep)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
@@ -4469,11 +4668,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
+ if (rep->grh_required)
+ ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
+
if (ll == IB_LINK_LAYER_INFINIBAND)
- return RDMA_CORE_PORT_IBA_IB;
+ return ret | RDMA_CORE_PORT_IBA_IB;
if (raw_support)
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ ret |= RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -4496,17 +4698,23 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr attr;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ struct mlx5_hca_vport_context rep = {0};
int err;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
-
err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
+ &rep);
+ if (err)
+ return err;
+ }
+
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
+ immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
@@ -4604,7 +4812,7 @@ static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
@@ -4683,12 +4891,21 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(req_cqe_flush_error),
};
+#define INIT_EXT_PPCNT_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
+
+static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
+ INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
+};
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
int i;
for (i = 0; i < dev->num_ports; i++) {
- if (dev->port[i].cnts.set_id)
+ if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
@@ -4718,7 +4935,10 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
num_counters += ARRAY_SIZE(cong_cnts);
}
-
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ }
cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
if (!cnts->names)
return -ENOMEM;
@@ -4775,6 +4995,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
offsets[j] = cong_cnts[i].offset;
}
}
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
+ names[j] = ext_ppcnt_cnts[i].name;
+ offsets[j] = ext_ppcnt_cnts[i].offset;
+ }
+ }
}
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
@@ -4820,7 +5047,8 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
return rdma_alloc_hw_stats_struct(port->cnts.names,
port->cnts.num_q_counters +
- port->cnts.num_cong_counters,
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -4853,6 +5081,34 @@ free:
return ret;
}
+static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_port *port,
+ struct rdma_hw_stats *stats)
+{
+ int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int ret, i;
+ void *out;
+
+ out = kvzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
+ stats->value[i + offset] =
+ be64_to_cpup((__be64 *)(out +
+ port->cnts.offsets[i + offset]));
+ }
+
+free:
+ kvfree(out);
+ return ret;
+}
+
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port_num, int index)
@@ -4866,13 +5122,21 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
if (!stats)
return -EINVAL;
- num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ num_counters = port->cnts.num_q_counters +
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters;
/* q_counters are per IB device, query the master mdev */
ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
+ if (ret)
+ return ret;
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
&mdev_port_num);
@@ -4899,11 +5163,6 @@ done:
return num_counters;
}
-static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
-{
- return mlx5_rdma_netdev_free(netdev);
-}
-
static struct net_device*
mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
u8 port_num,
@@ -4913,17 +5172,12 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
void (*setup)(struct net_device *))
{
struct net_device *netdev;
- struct rdma_netdev *rn;
if (type != RDMA_NETDEV_IPOIB)
return ERR_PTR(-EOPNOTSUPP);
netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
name, setup);
- if (likely(!IS_ERR_OR_NULL(netdev))) {
- rn = netdev_priv(netdev);
- rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
- }
return netdev;
}
@@ -5121,8 +5375,8 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
- port_num + 1);
+ mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
}
@@ -5257,45 +5511,47 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_dm, UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- &UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm,
+ UVERBS_OBJECT_DM,
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_flow_action,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ enum mlx5_ib_uapi_flow_action_flags));
-#define NUM_TREES 2
static int populate_specs_root(struct mlx5_ib_dev *dev)
{
- const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
- uverbs_default_get_objects()};
- size_t num_trees = 1;
+ const struct uverbs_object_tree_def **trees = dev->driver_trees;
+ size_t num_trees = 0;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_flow_action;
+ if (mlx5_accel_ipsec_device_caps(dev->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_DEVICE)
+ trees[num_trees++] = &mlx5_ib_flow_action;
- if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_dm;
+ if (MLX5_CAP_DEV_MEM(dev->mdev, memic))
+ trees[num_trees++] = &mlx5_ib_dm;
- dev->ib_dev.specs_root =
- uverbs_alloc_spec_tree(num_trees, default_root);
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_UCTX)
+ trees[num_trees++] = mlx5_ib_get_devx_tree();
- return PTR_ERR_OR_ZERO(dev->ib_dev.specs_root);
-}
+ num_trees += mlx5_ib_get_flow_trees(trees + num_trees);
-static void depopulate_specs_root(struct mlx5_ib_dev *dev)
-{
- uverbs_free_spec_tree(dev->ib_dev.specs_root);
+ WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees));
+ trees[num_trees] = NULL;
+ dev->ib_dev.driver_specs = trees;
+
+ return 0;
}
static int mlx5_ib_read_counters(struct ib_counters *counters,
@@ -5546,6 +5802,8 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
dev->ib_dev.query_qp = mlx5_ib_query_qp;
dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
+ dev->ib_dev.drain_sq = mlx5_ib_drain_sq;
+ dev->ib_dev.drain_rq = mlx5_ib_drain_rq;
dev->ib_dev.post_send = mlx5_ib_post_send;
dev->ib_dev.post_recv = mlx5_ib_post_recv;
dev->ib_dev.create_cq = mlx5_ib_create_cq;
@@ -5643,9 +5901,9 @@ int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
return 0;
}
-static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
- u8 port_num)
+static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
+ u8 port_num;
int i;
for (i = 0; i < dev->num_ports; i++) {
@@ -5668,6 +5926,8 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
return mlx5_add_netdev_notifier(dev, port_num);
}
@@ -5684,14 +5944,12 @@ int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
enum rdma_link_layer ll;
int port_type_cap;
int err = 0;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET)
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
return err;
}
@@ -5706,19 +5964,17 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
int err;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
if (err)
return err;
- err = mlx5_enable_eth(dev, port_num);
+ err = mlx5_enable_eth(dev);
if (err)
goto cleanup;
}
@@ -5735,9 +5991,7 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -5836,11 +6090,6 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, NULL);
}
-static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
-{
- depopulate_specs_root(dev);
-}
-
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
@@ -5909,8 +6158,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
ib_dealloc_device((struct ib_device *)dev);
}
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
-
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile)
{
@@ -5977,7 +6224,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6025,7 +6272,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6040,7 +6287,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_rep_reg_cleanup),
};
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
@@ -6074,8 +6321,6 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
- } else {
- mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
}
mutex_unlock(&mlx5_ib_multiport_mutex);
@@ -6093,11 +6338,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
- if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
- u8 port_num = mlx5_core_native_port_num(mdev) - 1;
-
- return mlx5_ib_add_slave_port(mdev, port_num);
- }
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
+ return mlx5_ib_add_slave_port(mdev);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
if (!dev)
@@ -6107,7 +6349,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- if (MLX5_VPORT_MANAGER(mdev) &&
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d89c8fe626f6..320d4dfe8c2f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -46,6 +46,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/mlx5-abi.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -78,12 +79,6 @@ enum {
MLX5_REQ_SCAT_DATA64_CQE = 0x22,
};
-enum mlx5_ib_latency_class {
- MLX5_IB_LATENCY_CLASS_LOW,
- MLX5_IB_LATENCY_CLASS_MEDIUM,
- MLX5_IB_LATENCY_CLASS_HIGH,
-};
-
enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_IGNORE_MKEY = 1,
MLX5_MAD_IFC_IGNORE_BKEY = 2,
@@ -143,6 +138,7 @@ struct mlx5_ib_ucontext {
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
+ u16 devx_uid;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -176,6 +172,18 @@ struct mlx5_ib_flow_handler {
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_handle *rule;
struct ib_counters *ibcounters;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_flow_matcher *flow_matcher;
+};
+
+struct mlx5_ib_flow_matcher {
+ struct mlx5_ib_match_params matcher_mask;
+ int mask_len;
+ enum mlx5_ib_flow_type flow_type;
+ u16 priority;
+ struct mlx5_core_dev *mdev;
+ atomic_t usecnt;
+ u8 match_criteria_enable;
};
struct mlx5_ib_flow_db {
@@ -461,7 +469,7 @@ struct mlx5_umr_wr {
u32 mkey;
};
-static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
+static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct mlx5_umr_wr, wr);
}
@@ -665,6 +673,7 @@ struct mlx5_ib_counters {
size_t *offsets;
u32 num_q_counters;
u32 num_cong_counters;
+ u32 num_ext_ppcnt_counters;
u16 set_id;
bool set_id_valid;
};
@@ -851,6 +860,7 @@ to_mcounters(struct ib_counters *ibcntrs)
struct mlx5_ib_dev {
struct ib_device ib_dev;
+ const struct uverbs_object_tree_def *driver_trees[6];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -1004,8 +1014,8 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1014,10 +1024,12 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+void mlx5_ib_drain_sq(struct ib_qp *qp);
+void mlx5_ib_drain_rq(struct ib_qp *qp);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
@@ -1183,10 +1195,8 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index);
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type);
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1200,10 +1210,10 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
@@ -1217,6 +1227,36 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
+struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id, int dest_type);
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+#else
+static inline int
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) {}
+static inline const struct uverbs_object_tree_def *
+mlx5_ib_get_devx_tree(void) { return NULL; }
+static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
+ int *dest_type)
+{
+ return false;
+}
+static inline int
+mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ return 0;
+}
+#endif
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1318,4 +1358,7 @@ static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
unsigned long mlx5_ib_get_xlt_emergency_page(void);
void mlx5_ib_put_xlt_emergency_page(void);
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 90a9c461cedc..9fb1d9cb9401 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -271,16 +271,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -310,19 +310,11 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations size_fops = {
@@ -337,16 +329,16 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -372,19 +364,11 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations limit_fops = {
@@ -914,7 +898,7 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
struct mlx5_umr_wr *umrwr)
{
struct umr_common *umrc = &dev->umrc;
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
int err;
struct mlx5_ib_umr_context umr_context;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1a87a690a4c..d216e0d2921d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -488,7 +488,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
down_read(&ctx->umem_rwsem);
rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
- mr_leaf_free, imr);
+ mr_leaf_free, true, imr);
up_read(&ctx->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a4f1f638509f..6cba2a02d11b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -563,32 +563,21 @@ static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
}
static int alloc_bfreg(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- enum mlx5_ib_latency_class lat)
+ struct mlx5_bfreg_info *bfregi)
{
- int bfregn = -EINVAL;
+ int bfregn = -ENOMEM;
mutex_lock(&bfregi->lock);
- switch (lat) {
- case MLX5_IB_LATENCY_CLASS_LOW:
+ if (bfregi->ver >= 2) {
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
+ if (bfregn < 0)
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
+ }
+
+ if (bfregn < 0) {
BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
bfregn = 0;
bfregi->count[bfregn]++;
- break;
-
- case MLX5_IB_LATENCY_CLASS_MEDIUM:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_med_class_bfreg(dev, bfregi);
- break;
-
- case MLX5_IB_LATENCY_CLASS_HIGH:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_high_class_bfreg(dev, bfregi);
- break;
}
mutex_unlock(&bfregi->lock);
@@ -641,13 +630,13 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
-static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn,
- bool dyn_bfreg)
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg)
{
- int bfregs_per_sys_page;
- int index_of_sys_page;
- int offset;
+ unsigned int bfregs_per_sys_page;
+ u32 index_of_sys_page;
+ u32 offset;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
@@ -655,6 +644,10 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
if (dyn_bfreg) {
index_of_sys_page += bfregi->num_static_sys_pages;
+
+ if (index_of_sys_page >= bfregi->num_sys_pages)
+ return -EINVAL;
+
if (bfregn > bfregi->num_dyn_bfregs ||
bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
@@ -819,21 +812,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
bfregn = MLX5_CROSS_CHANNEL_BFREG;
}
else {
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to medium latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
- if (bfregn < 0) {
- mlx5_ib_warn(dev, "bfreg allocation failed\n");
- return bfregn;
- }
- }
- }
+ bfregn = alloc_bfreg(dev, &context->bfregi);
+ if (bfregn < 0)
+ return bfregn;
}
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
@@ -1626,7 +1607,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_resources *devr = &dev->devr;
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp;
+ struct mlx5_ib_create_qp_resp resp = {};
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
@@ -2555,18 +2536,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- err = mlx5_get_roce_gid_type(dev, port, grh->sgid_index,
- &gid_type);
- if (err)
- return err;
+
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
if (qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC ||
qp->ibqp.qp_type == IB_QPT_XRC_INI ||
qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ path->udp_sport =
+ mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
} else {
@@ -3529,7 +3508,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
}
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
- struct ib_send_wr *wr, void *qend,
+ const struct ib_send_wr *wr, void *qend,
struct mlx5_ib_qp *qp, int *size)
{
void *seg = eseg;
@@ -3582,7 +3561,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
@@ -3730,9 +3709,9 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr, int atomic)
+ const struct ib_send_wr *wr, int atomic)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(umr, 0, sizeof(*umr));
@@ -3803,9 +3782,10 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE;
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
@@ -3854,7 +3834,7 @@ static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
seg += mr_list_size;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3886,7 +3866,7 @@ static u8 wq_sig(void *wqe)
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
}
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz)
{
struct mlx5_wqe_inline_seg *seg;
@@ -4032,7 +4012,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
return 0;
}
-static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
+static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size)
{
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
@@ -4134,7 +4114,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_sig_handover_wr *wr, u32 size,
+ const struct ib_sig_handover_wr *wr, u32 size,
u32 length, u32 pdn)
{
struct ib_mr *sig_mr = wr->sig_mr;
@@ -4165,10 +4145,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
}
-static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
- void **seg, int *size)
+static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size)
{
- struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
+ const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
u32 xlt_size;
@@ -4243,7 +4223,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
}
static int set_reg_wr(struct mlx5_ib_qp *qp,
- struct ib_reg_wr *wr,
+ const struct ib_reg_wr *wr,
void **seg, int *size)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
@@ -4314,10 +4294,10 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
}
}
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, unsigned *idx,
- int *size, int nreq)
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq, bool send_signaled, bool solicited)
{
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
return -ENOMEM;
@@ -4328,10 +4308,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*(uint32_t *)(*seg + 8) = 0;
(*ctrl)->imm = send_ieth(wr);
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
@@ -4339,6 +4317,16 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
return 0;
}
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id,
@@ -4360,9 +4348,8 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
qp->sq.w_list[idx].next = qp->sq.cur_post;
}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -4393,7 +4380,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4498,10 +4485,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* SET_PSV WQEs are not signaled and solicited
* on error
*/
- wr->send_flags &= ~IB_SEND_SIGNALED;
- wr->send_flags |= IB_SEND_SOLICITED;
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4520,8 +4505,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
fence, MLX5_OPCODE_SET_PSV);
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4690,13 +4675,19 @@ out:
return err;
}
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
{
sig->signature = calc_sig(sig, size);
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
@@ -4714,7 +4705,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4776,6 +4767,12 @@ out:
return err;
}
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5365,7 +5362,9 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
rwq->wqe_count = ucmd->rq_wqe_count;
rwq->wqe_shift = ucmd->rq_wqe_shift;
- rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
+ return -EINVAL;
+
rwq->log_rq_stride = rwq->wqe_shift;
rwq->log_rq_size = ilog2(rwq->wqe_count);
return 0;
@@ -5697,3 +5696,132 @@ out:
kvfree(in);
return err;
}
+
+struct mlx5_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx5_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx5_ib_drain_cqe *sdrain,
+ struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx5_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx5_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..d359fecf7a5b 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
- if (desc_size == 0 || srq->msrq.max_gs > desc_size)
- return ERR_PTR(-EINVAL);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size);
- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
- return ERR_PTR(-EINVAL);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+ err = -EINVAL;
+ goto err_srq;
+ }
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- if (buf_size < desc_size)
- return ERR_PTR(-EINVAL);
+ if (buf_size < desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
in.type = init_attr->srq_type;
if (pd->uobject)
@@ -440,8 +446,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;