aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/infiniband/core_locking.rst8
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/core/cache.c1
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/core_priv.h24
-rw-r--r--drivers/infiniband/core/counters.c8
-rw-r--r--drivers/infiniband/core/cq.c28
-rw-r--r--drivers/infiniband/core/device.c129
-rw-r--r--drivers/infiniband/core/fmr_pool.c13
-rw-r--r--drivers/infiniband/core/iwpm_msg.c17
-rw-r--r--drivers/infiniband/core/iwpm_util.c15
-rw-r--r--drivers/infiniband/core/netlink.c63
-rw-r--r--drivers/infiniband/core/nldev.c20
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c30
-rw-r--r--drivers/infiniband/core/umem.c50
-rw-r--r--drivers/infiniband/core/umem_odp.c437
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/core/verbs.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c45
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c24
-rw-r--r--drivers/infiniband/hw/efa/efa.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c70
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c165
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h23
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c91
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h6
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c17
-rw-r--r--drivers/infiniband/hw/hns/Kconfig8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c186
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c252
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c69
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1065
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c434
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c268
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c296
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c242
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c27
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c13
-rw-r--r--drivers/infiniband/hw/mlx5/main.c130
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c38
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c163
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c25
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c11
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c5
-rw-r--r--fs/cifs/smbdirect.c10
-rw-r--r--include/Kbuild6
-rw-r--r--include/linux/mlx5/device.h9
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h56
-rw-r--r--include/linux/qed/qed_rdma_if.h2
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/rdma/ib_umem_odp.h48
-rw-r--r--include/rdma/ib_verbs.h78
-rw-r--r--include/rdma/iw_portmap.h3
-rw-r--r--include/rdma/opa_port_info.h2
-rw-r--r--include/rdma/rdma_netlink.h10
-rw-r--r--include/rdma/rdmavt_cq.h1
-rw-r--r--include/rdma/signature.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h1
-rw-r--r--net/9p/trans_rdma.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c8
-rw-r--r--net/sunrpc/xprtrdma/verbs.c13
112 files changed, 3308 insertions, 2420 deletions
diff --git a/Documentation/infiniband/core_locking.rst b/Documentation/infiniband/core_locking.rst
index f34669beb4fe..8f76a8a5a38f 100644
--- a/Documentation/infiniband/core_locking.rst
+++ b/Documentation/infiniband/core_locking.rst
@@ -29,10 +29,10 @@ Sleeping and interrupt context
The corresponding functions exported to upper level protocol
consumers:
- - ib_create_ah
- - ib_modify_ah
- - ib_query_ah
- - ib_destroy_ah
+ - rdma_create_ah
+ - rdma_modify_ah
+ - rdma_query_ah
+ - rdma_destroy_ah
- ib_post_send
- ib_post_recv
- ib_req_notify_cq
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 85e103b147cc..b44b1c322ec8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ config INFINIBAND_ON_DEMAND_PAGING
bool "InfiniBand on-demand paging support"
depends on INFINIBAND_USER_MEM
select MMU_NOTIFIER
+ select INTERVAL_TREE
default y
---help---
On demand paging support for the InfiniBand subsystem.
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 9b76a8fcdd24..1dd467bed8fc 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
+ rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
/* Make the request retry, so when we get the response from userspace
* we will have something.
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 18e476b3ced0..00fb3eacda19 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device,
if (leak)
return;
+ mutex_destroy(&table->lock);
kfree(table->data_vec);
kfree(table);
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a68d0ccf67a4..0e3cf3461999 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3046,7 +3046,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
- } else {
+ } else if (status) {
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 3ec2c415bb70..8b0b5ae22e4c 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = {
int __init cma_configfs_init(void)
{
+ int ret;
+
config_group_init(&cma_subsys.su_group);
mutex_init(&cma_subsys.su_mutex);
- return configfs_register_subsystem(&cma_subsys);
+ ret = configfs_register_subsystem(&cma_subsys);
+ if (ret)
+ mutex_destroy(&cma_subsys.su_mutex);
+ return ret;
}
void __exit cma_configfs_exit(void)
{
configfs_unregister_subsystem(&cma_subsys);
+ mutex_destroy(&cma_subsys.su_mutex);
}
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index beee7b7e0d9a..3a8b0911c3bc 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/cgroup_rdma.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
@@ -54,8 +56,26 @@ struct pkey_index_qp_list {
struct list_head qp_list;
};
+/**
+ * struct rdma_dev_net - rdma net namespace metadata for a net
+ * @nl_sock: Pointer to netlink socket
+ * @net: Pointer to owner net namespace
+ * @id: xarray id to identify the net namespace.
+ */
+struct rdma_dev_net {
+ struct sock *nl_sock;
+ possible_net_t net;
+ u32 id;
+};
+
extern const struct attribute_group ib_dev_attr_group;
extern bool ib_devices_shared_netns;
+extern unsigned int rdma_dev_net_id;
+
+static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
+{
+ return net_generic(net, rdma_dev_net_id);
+}
int ib_device_register_sysfs(struct ib_device *device);
void ib_device_unregister_sysfs(struct ib_device *device);
@@ -179,7 +199,6 @@ void ib_mad_cleanup(void);
int ib_sa_init(void);
void ib_sa_cleanup(void);
-int rdma_nl_init(void);
void rdma_nl_exit(void);
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
@@ -365,4 +384,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj);
int ib_device_set_netns_put(struct sk_buff *skb,
struct ib_device *dev, u32 ns_fd);
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet);
+void rdma_nl_net_exit(struct rdma_dev_net *rnet);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index af8c85d18e62..680ad27f497d 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -599,7 +599,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port,
void rdma_counter_init(struct ib_device *dev)
{
struct rdma_port_counter *port_counter;
- u32 port;
+ u32 port, i;
if (!dev->port_data)
return;
@@ -620,13 +620,12 @@ void rdma_counter_init(struct ib_device *dev)
return;
fail:
- rdma_for_each_port(dev, port) {
+ for (i = port; i >= rdma_start_port(dev); i--) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
port_counter->hstats = NULL;
+ mutex_destroy(&port_counter->lock);
}
-
- return;
}
void rdma_counter_release(struct ib_device *dev)
@@ -637,5 +636,6 @@ void rdma_counter_release(struct ib_device *dev)
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
+ mutex_destroy(&port_counter->lock);
}
}
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 7c599878ccf7..bbfded6d5d3d 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -253,6 +253,34 @@ out_free_cq:
EXPORT_SYMBOL(__ib_alloc_cq_user);
/**
+ * __ib_alloc_cq_any - allocate a completion queue
+ * @dev: device to allocate the CQ for
+ * @private: driver private data, accessible from cq->cq_context
+ * @nr_cqe: number of CQEs to allocate
+ * @poll_ctx: context to poll the CQ from
+ * @caller: module owner name
+ *
+ * Attempt to spread ULP Completion Queues over each device's interrupt
+ * vectors. A simple best-effort mechanism is used.
+ */
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+ int nr_cqe, enum ib_poll_context poll_ctx,
+ const char *caller)
+{
+ static atomic_t counter;
+ int comp_vector = 0;
+
+ if (dev->num_comp_vectors > 1)
+ comp_vector =
+ atomic_inc_return(&counter) %
+ min_t(int, dev->num_comp_vectors, num_online_cpus());
+
+ return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+ caller, NULL);
+}
+EXPORT_SYMBOL(__ib_alloc_cq_any);
+
+/**
* ib_free_cq_user - free a completion queue
* @cq: completion queue to free.
* @udata: User data or NULL for kernel object
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ea8661a00651..99c4a55545cf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -39,7 +39,6 @@
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
-#include <net/netns/generic.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/hashtable.h>
@@ -111,17 +110,7 @@ static void ib_client_put(struct ib_client *client)
*/
#define CLIENT_DATA_REGISTERED XA_MARK_1
-/**
- * struct rdma_dev_net - rdma net namespace metadata for a net
- * @net: Pointer to owner net namespace
- * @id: xarray id to identify the net namespace.
- */
-struct rdma_dev_net {
- possible_net_t net;
- u32 id;
-};
-
-static unsigned int rdma_dev_net_id;
+unsigned int rdma_dev_net_id;
/*
* A list of net namespaces is maintained in an xarray. This is necessary
@@ -514,6 +503,9 @@ static void ib_device_release(struct device *device)
rcu_head);
}
+ mutex_destroy(&dev->unregistration_lock);
+ mutex_destroy(&dev->compat_devs_mutex);
+
xa_destroy(&dev->compat_devs);
xa_destroy(&dev->client_data);
kfree_rcu(dev, rcu_head);
@@ -1060,7 +1052,7 @@ int rdma_compatdev_set(u8 enable)
static void rdma_dev_exit_net(struct net *net)
{
- struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
struct ib_device *dev;
unsigned long index;
int ret;
@@ -1094,25 +1086,32 @@ static void rdma_dev_exit_net(struct net *net)
}
up_read(&devices_rwsem);
+ rdma_nl_net_exit(rnet);
xa_erase(&rdma_nets, rnet->id);
}
static __net_init int rdma_dev_init_net(struct net *net)
{
- struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
unsigned long index;
struct ib_device *dev;
int ret;
+ write_pnet(&rnet->net, net);
+
+ ret = rdma_nl_net_init(rnet);
+ if (ret)
+ return ret;
+
/* No need to create any compat devices in default init_net. */
if (net_eq(net, &init_net))
return 0;
- write_pnet(&rnet->net, net);
-
ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
- if (ret)
+ if (ret) {
+ rdma_nl_net_exit(rnet);
return ret;
+ }
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
@@ -1974,31 +1973,64 @@ void ib_dispatch_event(struct ib_event *event)
}
EXPORT_SYMBOL(ib_dispatch_event);
-/**
- * ib_query_port - Query IB port attributes
- * @device:Device to query
- * @port_num:Port number to query
- * @port_attr:Port attributes
- *
- * ib_query_port() returns the attributes of a port through the
- * @port_attr pointer.
- */
-int ib_query_port(struct ib_device *device,
- u8 port_num,
- struct ib_port_attr *port_attr)
+static int iw_query_port(struct ib_device *device,
+ u8 port_num,
+ struct ib_port_attr *port_attr)
{
- union ib_gid gid;
+ struct in_device *inetdev;
+ struct net_device *netdev;
int err;
- if (!rdma_is_port_valid(device, port_num))
- return -EINVAL;
+ memset(port_attr, 0, sizeof(*port_attr));
+
+ netdev = ib_device_get_netdev(device, port_num);
+ if (!netdev)
+ return -ENODEV;
+
+ dev_put(netdev);
+
+ port_attr->max_mtu = IB_MTU_4096;
+ port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+
+ if (!netif_carrier_ok(netdev)) {
+ port_attr->state = IB_PORT_DOWN;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ } else {
+ inetdev = in_dev_get(netdev);
+
+ if (inetdev && inetdev->ifa_list) {
+ port_attr->state = IB_PORT_ACTIVE;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ in_dev_put(inetdev);
+ } else {
+ port_attr->state = IB_PORT_INIT;
+ port_attr->phys_state =
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
+ }
+ }
+
+ err = device->ops.query_port(device, port_num, port_attr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __ib_query_port(struct ib_device *device,
+ u8 port_num,
+ struct ib_port_attr *port_attr)
+{
+ union ib_gid gid = {};
+ int err;
memset(port_attr, 0, sizeof(*port_attr));
+
err = device->ops.query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix)
return err;
- if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+ if (rdma_port_get_link_layer(device, port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
return 0;
err = device->ops.query_gid(device, port_num, 0, &gid);
@@ -2008,6 +2040,28 @@ int ib_query_port(struct ib_device *device,
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
return 0;
}
+
+/**
+ * ib_query_port - Query IB port attributes
+ * @device:Device to query
+ * @port_num:Port number to query
+ * @port_attr:Port attributes
+ *
+ * ib_query_port() returns the attributes of a port through the
+ * @port_attr pointer.
+ */
+int ib_query_port(struct ib_device *device,
+ u8 port_num,
+ struct ib_port_attr *port_attr)
+{
+ if (!rdma_is_port_valid(device, port_num))
+ return -EINVAL;
+
+ if (rdma_protocol_iwarp(device, port_num))
+ return iw_query_port(device, port_num, port_attr);
+ else
+ return __ib_query_port(device, port_num, port_attr);
+}
EXPORT_SYMBOL(ib_query_port);
static void add_ndev_hash(struct ib_port_data *pdata)
@@ -2562,6 +2616,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
+ SET_DEVICE_OP(dev_ops, invalidate_range);
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
@@ -2660,12 +2715,6 @@ static int __init ib_core_init(void)
goto err_comp_unbound;
}
- ret = rdma_nl_init();
- if (ret) {
- pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
- goto err_sysfs;
- }
-
ret = addr_init();
if (ret) {
pr_warn("Could't init IB address resolution\n");
@@ -2711,8 +2760,6 @@ err_mad:
err_addr:
addr_cleanup();
err_ibnl:
- rdma_nl_exit();
-err_sysfs:
class_unregister(&ib_class);
err_comp_unbound:
destroy_workqueue(ib_comp_unbound_wq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7d841b689a1e..e08aec427027 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
hlist_del_init(&fmr->cache_node);
fmr->remap_count = 0;
list_add_tail(&fmr->fmr->list, &fmr_list);
-
-#ifdef DEBUG
- if (fmr->ref_count !=0) {
- pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
- fmr, fmr->ref_count);
- }
-#endif
}
list_splice_init(&pool->dirty_list, &unmap_list);
@@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
}
}
-#ifdef DEBUG
- if (fmr->ref_count < 0)
- pr_warn(PFX "FMR %p has ref count %d < 0\n",
- fmr, fmr->ref_count);
-#endif
-
spin_unlock_irqrestore(&pool->pool_lock, flags);
}
EXPORT_SYMBOL(ib_fmr_pool_unmap);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 2452b0ddcf0d..46686990a827 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
- ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
@@ -124,8 +124,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
return ret;
pid_query_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
@@ -202,7 +201,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
- ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -214,8 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
add_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
add_mapping_error_nowarn:
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
@@ -297,7 +295,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
- ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
err_str = "Unable to send a nlmsg";
@@ -308,8 +306,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
query_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
query_mapping_error_nowarn:
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
@@ -364,7 +361,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
nlmsg_end(skb, nlh);
- ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 41929bb83739..13495b43dbc1 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
nlmsg_end(skb, nlh);
- ret = rdma_nl_unicast(skb, iwpm_pid);
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) {
skb = NULL;
err_str = "Unable to send a nlmsg";
@@ -655,8 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
return 0;
mapinfo_num_error:
pr_info("%s: %s\n", __func__, err_str);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return ret;
}
@@ -674,7 +673,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
- ret = rdma_nl_unicast(skb, iwpm_pid);
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret)
pr_warn("%s Unable to send a nlmsg\n", __func__);
return ret;
@@ -778,8 +777,7 @@ send_mapping_info_unlock:
send_mapping_info_exit:
if (ret) {
pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return ret;
}
send_nlmsg_done(skb, nl_client, iwpm_pid);
@@ -824,7 +822,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
goto hello_num_error;
nlmsg_end(skb, nlh);
- ret = rdma_nl_unicast(skb, iwpm_pid);
+ ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) {
skb = NULL;
err_str = "Unable to send a nlmsg";
@@ -834,7 +832,6 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
return 0;
hello_num_error:
pr_info("%s: %s\n", __func__, err_str);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return ret;
}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index eecfc0b377c9..81dbd5f41bed 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -36,20 +36,22 @@
#include <linux/export.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
#include <linux/module.h>
#include "core_priv.h"
static DEFINE_MUTEX(rdma_nl_mutex);
-static struct sock *nls;
static struct {
const struct rdma_nl_cbs *cb_table;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
bool rdma_nl_chk_listeners(unsigned int group)
{
- return netlink_has_listeners(nls, group);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
+
+ return netlink_has_listeners(rnet->nl_sock, group);
}
EXPORT_SYMBOL(rdma_nl_chk_listeners);
@@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
return (op < max_num_ops[type]) ? true : false;
}
-static bool is_nl_valid(unsigned int type, unsigned int op)
+static bool
+is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
{
const struct rdma_nl_cbs *cb_table;
if (!is_nl_msg_valid(type, op))
return false;
+ /*
+ * Currently only NLDEV client is supporting netlink commands in
+ * non init_net net namespace.
+ */
+ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
+ return false;
+
if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type);
@@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int op = RDMA_NL_GET_OP(type);
const struct rdma_nl_cbs *cb_table;
- if (!is_nl_valid(index, op))
+ if (!is_nl_valid(skb, index, op))
return -EINVAL;
cb_table = rdma_nl_types[index].cb_table;
@@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = cb_table[op].dump,
};
if (c.dump)
- return netlink_dump_start(nls, skb, nlh, &c);
+ return netlink_dump_start(skb->sk, skb, nlh, &c);
return -EINVAL;
}
@@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb)
mutex_unlock(&rdma_nl_mutex);
}
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
{
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err;
- err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
+ err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(rdma_nl_unicast);
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
{
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err;
- err = netlink_unicast(nls, skb, pid, 0);
+ err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(rdma_nl_unicast_wait);
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+ unsigned int group, gfp_t flags)
{
- return nlmsg_multicast(nls, skb, 0, group, flags);
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+
+ return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
}
EXPORT_SYMBOL(rdma_nl_multicast);
-int __init rdma_nl_init(void)
+void rdma_nl_exit(void)
+{
+ int idx;
+
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ WARN(rdma_nl_types[idx].cb_table,
+ "Netlink client %d wasn't released prior to unloading %s\n",
+ idx, KBUILD_MODNAME);
+}
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet)
{
+ struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
};
+ struct sock *nls;
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
+ nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
if (!nls)
return -ENOMEM;
nls->sk_sndtimeo = 10 * HZ;
+ rnet->nl_sock = nls;
return 0;
}
-void rdma_nl_exit(void)
+void rdma_nl_net_exit(struct rdma_dev_net *rnet)
{
- int idx;
-
- for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
- rdma_nl_unregister(idx);
-
- netlink_kernel_release(nls);
+ netlink_kernel_release(rnet->nl_sock);
}
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 020c26976558..7a7474000100 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -831,7 +831,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
@@ -971,7 +971,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
@@ -1073,7 +1073,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
@@ -1250,7 +1250,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
@@ -1595,7 +1595,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
put_device(data.cdev);
if (ibdev)
ib_device_put(ibdev);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
out_data:
put_device(data.cdev);
@@ -1635,7 +1635,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
nlmsg_end(msg, nlh);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
}
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1733,7 +1733,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill:
rdma_counter_unbind_qpn(device, port, qpn, cntn);
@@ -1801,7 +1801,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill:
rdma_counter_bind_qpn(device, port, qpn, cntn);
@@ -1892,7 +1892,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
mutex_unlock(&stats->lock);
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_table:
nla_nest_cancel(msg, table_attr);
@@ -1964,7 +1964,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh);
ib_device_put(device);
- return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_msg:
nlmsg_free(msg);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7d8071c7e564..17fc2936c077 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
+ return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
}
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b477295a96c2..7a50cedcef1f 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
ib_width_enum_to_int(attr.active_width), speed);
}
+static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
+{
+ static const char * phys_state_str[] = {
+ "<unknown>",
+ "Sleep",
+ "Polling",
+ "Disabled",
+ "PortConfigurationTraining",
+ "LinkUp",
+ "LinkErrorRecovery",
+ "Phy Test",
+ };
+
+ if (phys_state < ARRAY_SIZE(phys_state_str))
+ return phys_state_str[phys_state];
+ return "<unknown>";
+}
+
static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
char *buf)
{
@@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- switch (attr.phys_state) {
- case 1: return sprintf(buf, "1: Sleep\n");
- case 2: return sprintf(buf, "2: Polling\n");
- case 3: return sprintf(buf, "3: Disabled\n");
- case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
- case 5: return sprintf(buf, "5: LinkUp\n");
- case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
- case 7: return sprintf(buf, "7: Phy Test\n");
- default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
- }
+ return sprintf(buf, "%d: %s\n", attr.phys_state,
+ phys_state_to_str(attr.phys_state));
}
static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 56553668256f..37eb8643ec29 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -184,9 +184,6 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
- * If access flags indicate ODP memory, avoid pinning. Instead, stores
- * the mm for future page fault handling in conjunction with MMU notifiers.
- *
* @udata: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
@@ -231,17 +228,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- if (access & IB_ACCESS_ON_DEMAND) {
- umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
- if (!umem)
- return ERR_PTR(-ENOMEM);
- umem->is_odp = 1;
- } else {
- umem = kzalloc(sizeof(*umem), GFP_KERNEL);
- if (!umem)
- return ERR_PTR(-ENOMEM);
- }
+ if (access & IB_ACCESS_ON_DEMAND)
+ return ERR_PTR(-EOPNOTSUPP);
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
umem->context = context;
umem->length = size;
umem->address = addr;
@@ -249,18 +241,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem->owning_mm = mm = current->mm;
mmgrab(mm);
- if (access & IB_ACCESS_ON_DEMAND) {
- if (WARN_ON_ONCE(!context->invalidate_range)) {
- ret = -EINVAL;
- goto umem_kfree;
- }
-
- ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
- if (ret)
- goto umem_kfree;
- return umem;
- }
-
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
ret = -ENOMEM;
@@ -346,15 +326,6 @@ umem_kfree:
}
EXPORT_SYMBOL(ib_umem_get);
-static void __ib_umem_release_tail(struct ib_umem *umem)
-{
- mmdrop(umem->owning_mm);
- if (umem->is_odp)
- kfree(to_ib_umem_odp(umem));
- else
- kfree(umem);
-}
-
/**
* ib_umem_release - release memory pinned with ib_umem_get
* @umem: umem struct to release
@@ -363,17 +334,14 @@ void ib_umem_release(struct ib_umem *umem)
{
if (!umem)
return;
-
- if (umem->is_odp) {
- ib_umem_odp_release(to_ib_umem_odp(umem));
- __ib_umem_release_tail(umem);
- return;
- }
+ if (umem->is_odp)
+ return ib_umem_odp_release(to_ib_umem_odp(umem));
__ib_umem_release(umem->context->device, umem, 1);
atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
- __ib_umem_release_tail(umem);
+ mmdrop(umem->owning_mm);
+ kfree(umem);
}
EXPORT_SYMBOL(ib_umem_release);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c0e15db34680..bd01c77aeef0 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -39,44 +39,14 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
-#include <linux/interval_tree_generic.h>
+#include <linux/interval_tree.h>
#include <linux/pagemap.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
-/*
- * The ib_umem list keeps track of memory regions for which the HW
- * device request to receive notification when the related memory
- * mapping is changed.
- *
- * ib_umem_lock protects the list.
- */
-
-static u64 node_start(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_start(umem_odp);
-}
-
-/* Note that the representation of the intervals in the interval tree
- * considers the ending point as contained in the interval, while the
- * function ib_umem_end returns the first address which is not contained
- * in the umem.
- */
-static u64 node_last(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_end(umem_odp) - 1;
-}
-
-INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
- node_start, node_last, static, rbt_ib_umem)
+#include "uverbs.h"
static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
{
@@ -104,31 +74,34 @@ static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex);
}
-static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
- u64 start, u64 end, void *cookie)
-{
- /*
- * Increase the number of notifiers running, to
- * prevent any further fault handling on this MR.
- */
- ib_umem_notifier_start_account(umem_odp);
- complete_all(&umem_odp->notifier_completion);
- umem_odp->umem.context->invalidate_range(
- umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
- return 0;
-}
-
static void ib_umem_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn);
+ struct rb_node *node;
down_read(&per_mm->umem_rwsem);
- if (per_mm->active)
- rbt_ib_umem_for_each_in_range(
- &per_mm->umem_tree, 0, ULLONG_MAX,
- ib_umem_notifier_release_trampoline, true, NULL);
+ if (!per_mm->active)
+ goto out;
+
+ for (node = rb_first_cached(&per_mm->umem_tree); node;
+ node = rb_next(node)) {
+ struct ib_umem_odp *umem_odp =
+ rb_entry(node, struct ib_umem_odp, interval_tree.rb);
+
+ /*
+ * Increase the number of notifiers running, to prevent any
+ * further fault handling on this MR.
+ */
+ ib_umem_notifier_start_account(umem_odp);
+ complete_all(&umem_odp->notifier_completion);
+ umem_odp->umem.context->device->ops.invalidate_range(
+ umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ }
+
+out:
up_read(&per_mm->umem_rwsem);
}
@@ -136,7 +109,7 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
u64 start, u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->umem.context->invalidate_range(item, start, end);
+ item->umem.context->device->ops.invalidate_range(item, start, end);
return 0;
}
@@ -200,27 +173,13 @@ static const struct mmu_notifier_ops ib_umem_notifiers = {
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
-static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
-{
- struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
-
- down_write(&per_mm->umem_rwsem);
- if (likely(ib_umem_start(umem_odp) != ib_umem_end(umem_odp)))
- rbt_ib_umem_insert(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- up_write(&per_mm->umem_rwsem);
-}
-
static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
{
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
down_write(&per_mm->umem_rwsem);
- if (likely(ib_umem_start(umem_odp) != ib_umem_end(umem_odp)))
- rbt_ib_umem_remove(&umem_odp->interval_tree,
- &per_mm->umem_tree);
+ interval_tree_remove(&umem_odp->interval_tree, &per_mm->umem_tree);
complete_all(&umem_odp->notifier_completion);
-
up_write(&per_mm->umem_rwsem);
}
@@ -263,33 +222,23 @@ out_pid:
return ERR_PTR(ret);
}
-static int get_per_mm(struct ib_umem_odp *umem_odp)
+static struct ib_ucontext_per_mm *get_per_mm(struct ib_umem_odp *umem_odp)
{
struct ib_ucontext *ctx = umem_odp->umem.context;
struct ib_ucontext_per_mm *per_mm;
+ lockdep_assert_held(&ctx->per_mm_list_lock);
+
/*
* Generally speaking we expect only one or two per_mm in this list,
* so no reason to optimize this search today.
*/
- mutex_lock(&ctx->per_mm_list_lock);
list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
if (per_mm->mm == umem_odp->umem.owning_mm)
- goto found;
+ return per_mm;
}
- per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
- if (IS_ERR(per_mm)) {
- mutex_unlock(&ctx->per_mm_list_lock);
- return PTR_ERR(per_mm);
- }
-
-found:
- umem_odp->per_mm = per_mm;
- per_mm->odp_mrs_count++;
- mutex_unlock(&ctx->per_mm_list_lock);
-
- return 0;
+ return alloc_per_mm(ctx, umem_odp->umem.owning_mm);
}
static void free_per_mm(struct rcu_head *rcu)
@@ -330,76 +279,218 @@ static void put_per_mm(struct ib_umem_odp *umem_odp)
mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
}
-struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root,
- unsigned long addr, size_t size)
+static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ struct ib_ucontext_per_mm *per_mm)
+{
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ int ret;
+
+ umem_odp->umem.is_odp = 1;
+ if (!umem_odp->is_implicit_odp) {
+ size_t page_size = 1UL << umem_odp->page_shift;
+ size_t pages;
+
+ umem_odp->interval_tree.start =
+ ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length,
+ &umem_odp->interval_tree.last))
+ return -EOVERFLOW;
+ umem_odp->interval_tree.last =
+ ALIGN(umem_odp->interval_tree.last, page_size);
+ if (unlikely(umem_odp->interval_tree.last < page_size))
+ return -EOVERFLOW;
+
+ pages = (umem_odp->interval_tree.last -
+ umem_odp->interval_tree.start) >>
+ umem_odp->page_shift;
+ if (!pages)
+ return -EINVAL;
+
+ /*
+ * Note that the representation of the intervals in the
+ * interval tree considers the ending point as contained in
+ * the interval.
+ */
+ umem_odp->interval_tree.last--;
+
+ umem_odp->page_list = kvcalloc(
+ pages, sizeof(*umem_odp->page_list), GFP_KERNEL);
+ if (!umem_odp->page_list)
+ return -ENOMEM;
+
+ umem_odp->dma_list = kvcalloc(
+ pages, sizeof(*umem_odp->dma_list), GFP_KERNEL);
+ if (!umem_odp->dma_list) {
+ ret = -ENOMEM;
+ goto out_page_list;
+ }
+ }
+
+ mutex_lock(&ctx->per_mm_list_lock);
+ if (!per_mm) {
+ per_mm = get_per_mm(umem_odp);
+ if (IS_ERR(per_mm)) {
+ ret = PTR_ERR(per_mm);
+ goto out_unlock;
+ }
+ }
+ umem_odp->per_mm = per_mm;
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ mutex_init(&umem_odp->umem_mutex);
+ init_completion(&umem_odp->notifier_completion);
+
+ if (!umem_odp->is_implicit_odp) {
+ down_write(&per_mm->umem_rwsem);
+ interval_tree_insert(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ up_write(&per_mm->umem_rwsem);
+ }
+ mmgrab(umem_odp->umem.owning_mm);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ctx->per_mm_list_lock);
+ kvfree(umem_odp->dma_list);
+out_page_list:
+ kvfree(umem_odp->page_list);
+ return ret;
+}
+
+/**
+ * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
+ *
+ * Implicit ODP umems do not have a VA range and do not have any page lists.
+ * They exist only to hold the per_mm reference to help the driver create
+ * children umems.
+ *
+ * @udata: udata from the syscall being used to create the umem
+ * @access: ib_reg_mr access flags
+ */
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+ int access)
+{
+ struct ib_ucontext *context =
+ container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ struct ib_umem *umem;
+ struct ib_umem_odp *umem_odp;
+ int ret;
+
+ if (access & IB_ACCESS_HUGETLB)
+ return ERR_PTR(-EINVAL);
+
+ if (!context)
+ return ERR_PTR(-EIO);
+ if (WARN_ON_ONCE(!context->device->ops.invalidate_range))
+ return ERR_PTR(-EINVAL);
+
+ umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
+ if (!umem_odp)
+ return ERR_PTR(-ENOMEM);
+ umem = &umem_odp->umem;
+ umem->context = context;
+ umem->writable = ib_access_writable(access);
+ umem->owning_mm = current->mm;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->page_shift = PAGE_SHIFT;
+
+ ret = ib_init_umem_odp(umem_odp, NULL);
+ if (ret) {
+ kfree(umem_odp);
+ return ERR_PTR(ret);
+ }
+ return umem_odp;
+}
+EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
+
+/**
+ * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
+ * parent ODP umem
+ *
+ * @root: The parent umem enclosing the child. This must be allocated using
+ * ib_alloc_implicit_odp_umem()
+ * @addr: The starting userspace VA
+ * @size: The length of the userspace VA
+ */
+struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
+ unsigned long addr, size_t size)
{
- struct ib_ucontext_per_mm *per_mm = root->per_mm;
- struct ib_ucontext *ctx = per_mm->context;
+ /*
+ * Caller must ensure that root cannot be freed during the call to
+ * ib_alloc_odp_umem.
+ */
struct ib_umem_odp *odp_data;
struct ib_umem *umem;
- int pages = size >> PAGE_SHIFT;
int ret;
+ if (WARN_ON(!root->is_implicit_odp))
+ return ERR_PTR(-EINVAL);
+
odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
if (!odp_data)
return ERR_PTR(-ENOMEM);
umem = &odp_data->umem;
- umem->context = ctx;
+ umem->context = root->umem.context;
umem->length = size;
umem->address = addr;
- odp_data->page_shift = PAGE_SHIFT;
umem->writable = root->umem.writable;
- umem->is_odp = 1;
- odp_data->per_mm = per_mm;
- umem->owning_mm = per_mm->mm;
- mmgrab(umem->owning_mm);
-
- mutex_init(&odp_data->umem_mutex);
- init_completion(&odp_data->notifier_completion);
-
- odp_data->page_list =
- vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
- if (!odp_data->page_list) {
- ret = -ENOMEM;
- goto out_odp_data;
- }
+ umem->owning_mm = root->umem.owning_mm;
+ odp_data->page_shift = PAGE_SHIFT;
- odp_data->dma_list =
- vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
- if (!odp_data->dma_list) {
- ret = -ENOMEM;
- goto out_page_list;
+ ret = ib_init_umem_odp(odp_data, root->per_mm);
+ if (ret) {
+ kfree(odp_data);
+ return ERR_PTR(ret);
}
-
- /*
- * Caller must ensure that the umem_odp that the per_mm came from
- * cannot be freed during the call to ib_alloc_odp_umem.
- */
- mutex_lock(&ctx->per_mm_list_lock);
- per_mm->odp_mrs_count++;
- mutex_unlock(&ctx->per_mm_list_lock);
- add_umem_to_per_mm(odp_data);
-
return odp_data;
-
-out_page_list:
- vfree(odp_data->page_list);
-out_odp_data:
- mmdrop(umem->owning_mm);
- kfree(odp_data);
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_alloc_odp_umem);
+EXPORT_SYMBOL(ib_umem_odp_alloc_child);
-int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
+/**
+ * ib_umem_odp_get - Create a umem_odp for a userspace va
+ *
+ * @udata: userspace context to pin memory for
+ * @addr: userspace virtual address to start at
+ * @size: length of region to pin
+ * @access: IB_ACCESS_xxx flags for memory being pinned
+ *
+ * The driver should use when the access flags indicate ODP memory. It avoids
+ * pinning, instead, stores the mm for future page fault handling in
+ * conjunction with MMU notifiers.
+ */
+struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
+ size_t size, int access)
{
- struct ib_umem *umem = &umem_odp->umem;
- /*
- * NOTE: This must called in a process context where umem->owning_mm
- * == current->mm
- */
- struct mm_struct *mm = umem->owning_mm;
- int ret_val;
+ struct ib_umem_odp *umem_odp;
+ struct ib_ucontext *context;
+ struct mm_struct *mm;
+ int ret;
+
+ if (!udata)
+ return ERR_PTR(-EIO);
+
+ context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ if (!context)
+ return ERR_PTR(-EIO);
+
+ if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) ||
+ WARN_ON_ONCE(!context->device->ops.invalidate_range))
+ return ERR_PTR(-EINVAL);
+
+ umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
+ if (!umem_odp)
+ return ERR_PTR(-ENOMEM);
+
+ umem_odp->umem.context = context;
+ umem_odp->umem.length = size;
+ umem_odp->umem.address = addr;
+ umem_odp->umem.writable = ib_access_writable(access);
+ umem_odp->umem.owning_mm = mm = current->mm;
umem_odp->page_shift = PAGE_SHIFT;
if (access & IB_ACCESS_HUGETLB) {
@@ -410,46 +501,24 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
vma = find_vma(mm, ib_umem_start(umem_odp));
if (!vma || !is_vm_hugetlb_page(vma)) {
up_read(&mm->mmap_sem);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free;
}
h = hstate_vma(vma);
umem_odp->page_shift = huge_page_shift(h);
up_read(&mm->mmap_sem);
}
- mutex_init(&umem_odp->umem_mutex);
-
- init_completion(&umem_odp->notifier_completion);
+ ret = ib_init_umem_odp(umem_odp, NULL);
+ if (ret)
+ goto err_free;
+ return umem_odp;
- if (ib_umem_odp_num_pages(umem_odp)) {
- umem_odp->page_list =
- vzalloc(array_size(sizeof(*umem_odp->page_list),
- ib_umem_odp_num_pages(umem_odp)));
- if (!umem_odp->page_list)
- return -ENOMEM;
-
- umem_odp->dma_list =
- vzalloc(array_size(sizeof(*umem_odp->dma_list),
- ib_umem_odp_num_pages(umem_odp)));
- if (!umem_odp->dma_list) {
- ret_val = -ENOMEM;
- goto out_page_list;
- }
- }
-
- ret_val = get_per_mm(umem_odp);
- if (ret_val)
- goto out_dma_list;
- add_umem_to_per_mm(umem_odp);
-
- return 0;
-
-out_dma_list:
- vfree(umem_odp->dma_list);
-out_page_list:
- vfree(umem_odp->page_list);
- return ret_val;
+err_free:
+ kfree(umem_odp);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL(ib_umem_odp_get);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
@@ -459,14 +528,18 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
-
- remove_umem_from_per_mm(umem_odp);
+ if (!umem_odp->is_implicit_odp) {
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ remove_umem_from_per_mm(umem_odp);
+ kvfree(umem_odp->dma_list);
+ kvfree(umem_odp->page_list);
+ }
put_per_mm(umem_odp);
- vfree(umem_odp->dma_list);
- vfree(umem_odp->page_list);
+ mmdrop(umem_odp->umem.owning_mm);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/*
* Map for DMA and insert a single page into the on-demand paging page tables.
@@ -534,7 +607,7 @@ out:
if (remove_existing_mapping) {
ib_umem_notifier_start_account(umem_odp);
- context->invalidate_range(
+ dev->ops.invalidate_range(
umem_odp,
ib_umem_start(umem_odp) +
(page_index << umem_odp->page_shift),
@@ -761,35 +834,21 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
void *cookie)
{
int ret_val = 0;
- struct umem_odp_node *node, *next;
+ struct interval_tree_node *node, *next;
struct ib_umem_odp *umem;
if (unlikely(start == last))
return ret_val;
- for (node = rbt_ib_umem_iter_first(root, start, last - 1);
+ for (node = interval_tree_iter_first(root, start, last - 1);
node; node = next) {
/* TODO move the blockable decision up to the callback */
if (!blockable)
return -EAGAIN;
- next = rbt_ib_umem_iter_next(node, start, last - 1);
+ next = interval_tree_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
ret_val = cb(umem, start, last, cookie) || ret_val;
}
return ret_val;
}
-EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
-
-struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
- u64 addr, u64 length)
-{
- struct umem_odp_node *node;
-
- node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
- if (node)
- return container_of(node, struct ib_umem_odp, interval_tree);
- return NULL;
-
-}
-EXPORT_SYMBOL(rbt_ib_umem_lookup);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index ffdeaf6e0b68..d1407fa378e8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1042,7 +1042,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
ib_unregister_mad_agent(file->agent[i]);
mutex_unlock(&file->port->file_mutex);
-
+ mutex_destroy(&file->mutex);
kfree(file);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 7ddd0e5bc6b3..8f4fd4fac159 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -275,8 +275,6 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
if (ret)
goto err_file;
- if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
- ucontext->invalidate_range = NULL;
rdma_restrack_uadd(&ucontext->res);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 11c13c1381cf..02b57240176c 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device)
uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
+ mutex_destroy(&dev->lists_mutex);
+ mutex_destroy(&dev->xrcd_tree_mutex);
kfree(dev);
}
@@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref)
if (file->disassociate_page)
__free_pages(file->disassociate_page, 0);
+ mutex_destroy(&file->umap_lock);
+ mutex_destroy(&file->ucontext_lock);
kfree(file);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 92349bf37589..f974b6854224 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
if (ret)
return ret;
}
+ mutex_destroy(&xrcd->tgt_qp_mutex);
return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 098ab883733e..b4149dc9e824 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -220,10 +220,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
port_attr->state = IB_PORT_ACTIVE;
- port_attr->phys_state = 5;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
port_attr->state = IB_PORT_DOWN;
- port_attr->phys_state = 3;
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
@@ -1398,7 +1398,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
bnxt_qplib_destroy_srq(&rdev->qplib_res,
&srq->qplib_srq);
- goto exit;
+ goto fail;
}
}
if (nq)
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 029babe713f3..30a54f8aa42c 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1473,7 +1473,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
return 0;
free_sctx:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e775c1a1a450..dcf02ec02810 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -991,33 +991,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
- struct iwch_dev *dev;
- struct net_device *netdev;
- struct in_device *inetdev;
-
pr_debug("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- netdev = dev->rdev.port_info.lldevs[port-1];
-
- /* props being zeroed by the caller, avoid zeroing it here */
- props->max_mtu = IB_MTU_4096;
- props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
- if (!netif_carrier_ok(netdev))
- props->state = IB_PORT_DOWN;
- else {
- inetdev = in_dev_get(netdev);
- if (inetdev) {
- if (inetdev->ifa_list)
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_INIT;
- in_dev_put(inetdev);
- } else
- props->state = IB_PORT_INIT;
- }
-
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
@@ -1273,8 +1248,24 @@ static const struct ib_device_ops iwch_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
};
+static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < rdev->port_info.nports; i++) {
+ ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
+ i + 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
int iwch_register_device(struct iwch_dev *dev)
{
+ int err;
+
pr_debug("%s iwch_dev %p\n", __func__, dev);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
@@ -1315,6 +1306,10 @@ int iwch_register_device(struct iwch_dev *dev)
rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
+ err = set_netdevs(&dev->ibdev, &dev->rdev);
+ if (err)
+ return err;
+
return ib_register_device(&dev->ibdev, "cxgb3_%d");
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 5e59c5708729..d373ac0fe2cb 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -305,32 +305,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
- struct c4iw_dev *dev;
- struct net_device *netdev;
- struct in_device *inetdev;
-
pr_debug("ibdev %p\n", ibdev);
- dev = to_c4iw_dev(ibdev);
- netdev = dev->rdev.lldi.ports[port-1];
- /* props being zeroed by the caller, avoid zeroing it here */
- props->max_mtu = IB_MTU_4096;
- props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
- if (!netif_carrier_ok(netdev))
- props->state = IB_PORT_DOWN;
- else {
- inetdev = in_dev_get(netdev);
- if (inetdev) {
- if (inetdev->ifa_list)
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_INIT;
- in_dev_put(inetdev);
- } else
- props->state = IB_PORT_INIT;
- }
-
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 119f8efec564..2283e432693e 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -156,5 +156,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
u8 port_num);
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port_num, int index);
#endif /* _EFA_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 2cb42484b0f8..3c412bc5b94f 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -109,17 +109,19 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
} while (time_is_after_jiffies(exp_time));
if (read_resp->req_id != mmio_read->seq_num) {
- ibdev_err(edev->efa_dev,
- "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
err = EFA_MMIO_READ_INVALID;
goto out;
}
if (read_resp->reg_off != offset) {
- ibdev_err(edev->efa_dev,
- "Reading register failed: wrong offset provided\n");
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Reading register failed: wrong offset provided\n");
err = EFA_MMIO_READ_INVALID;
goto out;
}
@@ -293,9 +295,10 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
u16 ctx_id = cmd_id & (aq->depth - 1);
if (aq->comp_ctx[ctx_id].occupied && capture) {
- ibdev_err(aq->efa_dev,
- "Completion context for command_id %#x is occupied\n",
- cmd_id);
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Completion context for command_id %#x is occupied\n",
+ cmd_id);
return NULL;
}
@@ -401,7 +404,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
spin_lock(&aq->sq.lock);
if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
- ibdev_err(aq->efa_dev, "Admin queue is closed\n");
+ ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
spin_unlock(&aq->sq.lock);
return ERR_PTR(-ENODEV);
}
@@ -519,8 +522,9 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c
break;
if (time_is_before_jiffies(timeout)) {
- ibdev_err(aq->efa_dev,
- "Wait for completion (polling) timeout\n");
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Wait for completion (polling) timeout\n");
/* EFA didn't have any completion */
atomic64_inc(&aq->stats.no_completion);
@@ -561,17 +565,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
atomic64_inc(&aq->stats.no_completion);
if (comp_ctx->status == EFA_CMD_COMPLETED)
- ibdev_err(aq->efa_dev,
- "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
- efa_com_cmd_str(comp_ctx->cmd_opcode),
- comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
else
- ibdev_err(aq->efa_dev,
- "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
- efa_com_cmd_str(comp_ctx->cmd_opcode),
- comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ efa_com_cmd_str(comp_ctx->cmd_opcode),
+ comp_ctx->cmd_opcode, comp_ctx->status,
+ comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
err = -ETIME;
@@ -633,10 +639,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
cmd->aq_common_descriptor.opcode);
comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
if (IS_ERR(comp_ctx)) {
- ibdev_err(aq->efa_dev,
- "Failed to submit command %s (opcode %u) err %ld\n",
- efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Failed to submit command %s (opcode %u) err %ld\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
up(&aq->avail_cmds);
return PTR_ERR(comp_ctx);
@@ -644,11 +651,12 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
if (err)
- ibdev_err(aq->efa_dev,
- "Failed to process command %s (opcode %u) comp_status %d err %d\n",
- efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode,
- comp_ctx->comp_status, err);
+ ibdev_err_ratelimited(
+ aq->efa_dev,
+ "Failed to process command %s (opcode %u) comp_status %d err %d\n",
+ efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+ cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
+ err);
up(&aq->avail_cmds);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 62345d8abf3c..501dce89f275 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -44,7 +44,8 @@ int efa_com_create_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create qp [%d]\n", err);
return err;
}
@@ -82,9 +83,10 @@ int efa_com_modify_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
- cmd.qp_handle, cmd.modify_mask, err);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
+ cmd.qp_handle, cmd.modify_mask, err);
return err;
}
@@ -109,8 +111,9 @@ int efa_com_query_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n",
- cmd.qp_handle, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to query qp-%u [%d]\n",
+ cmd.qp_handle, err);
return err;
}
@@ -139,8 +142,9 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
- qp_cmd.qp_handle, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy qp-%u [%d]\n",
+ qp_cmd.qp_handle, err);
return err;
}
@@ -173,7 +177,8 @@ int efa_com_create_cq(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create cq[%d]\n", err);
return err;
}
@@ -201,8 +206,9 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
sizeof(destroy_resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
- params->cq_idx, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy CQ-%u [%d]\n",
+ params->cq_idx, err);
return err;
}
@@ -250,7 +256,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to register mr [%d]\n", err);
return err;
}
@@ -277,9 +284,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to de-register mr(lkey-%u) [%d]\n",
- mr_cmd.l_key, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to de-register mr(lkey-%u) [%d]\n",
+ mr_cmd.l_key, err);
return err;
}
@@ -306,8 +313,9 @@ int efa_com_create_ah(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to create ah for %pI6 [%d]\n",
- ah_cmd.dest_addr, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create ah for %pI6 [%d]\n",
+ ah_cmd.dest_addr, err);
return err;
}
@@ -334,8 +342,9 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
- ah_cmd.ah, ah_cmd.pd, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy ah-%d pd-%d [%d]\n",
+ ah_cmd.ah, ah_cmd.pd, err);
return err;
}
@@ -367,8 +376,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) {
- ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
- feature_id);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Feature %d isn't supported\n",
+ feature_id);
return -EOPNOTSUPP;
}
@@ -396,9 +406,10 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
sizeof(*get_resp));
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to submit get_feature command %d [%d]\n",
- feature_id, err);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to submit get_feature command %d [%d]\n",
+ feature_id, err);
return err;
}
@@ -421,8 +432,9 @@ int efa_com_get_network_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_NETWORK_ATTR);
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to get network attributes %d\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
return err;
}
@@ -441,8 +453,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
if (err) {
- ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n",
- err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get device attributes %d\n",
+ err);
return err;
}
@@ -456,9 +469,10 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->db_bar = resp.u.device_attr.db_bar;
if (result->admin_api_version < 1) {
- ibdev_err(edev->efa_dev,
- "Failed to get device attr api version [%u < 1]\n",
- result->admin_api_version);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get device attr api version [%u < 1]\n",
+ result->admin_api_version);
return -EINVAL;
}
@@ -466,8 +480,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_QUEUE_ATTR);
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to get network attributes %d\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
return err;
}
@@ -497,7 +512,8 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
if (err) {
- ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get hw hints %d\n", err);
return err;
}
@@ -520,8 +536,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) {
- ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
- feature_id);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Feature %d isn't supported\n",
+ feature_id);
return -EOPNOTSUPP;
}
@@ -545,9 +562,10 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
sizeof(*set_resp));
if (err) {
- ibdev_err(edev->efa_dev,
- "Failed to submit set_feature command %d error: %d\n",
- feature_id, err);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to submit set_feature command %d error: %d\n",
+ feature_id, err);
return err;
}
@@ -574,8 +592,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
if (err) {
- ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n",
- err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get aenq attributes: %d\n",
+ err);
return err;
}
@@ -585,9 +604,10 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
get_resp.u.aenq.enabled_groups);
if ((get_resp.u.aenq.supported_groups & groups) != groups) {
- ibdev_err(edev->efa_dev,
- "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
- groups, get_resp.u.aenq.supported_groups);
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
+ groups, get_resp.u.aenq.supported_groups);
return -EOPNOTSUPP;
}
@@ -595,8 +615,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
err = efa_com_set_feature(edev, &set_resp, &cmd,
EFA_ADMIN_AENQ_CONFIG);
if (err) {
- ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n",
- err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to set aenq attributes: %d\n",
+ err);
return err;
}
@@ -619,7 +640,8 @@ int efa_com_alloc_pd(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to allocate pd[%d]\n", err);
return err;
}
@@ -645,8 +667,9 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n",
- cmd.pd, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to deallocate pd-%u [%d]\n",
+ cmd.pd, err);
return err;
}
@@ -669,7 +692,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to allocate uar[%d]\n", err);
return err;
}
@@ -695,10 +719,47 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
- ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n",
- cmd.uar, err);
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to deallocate uar-%u [%d]\n",
+ cmd.uar, err);
return err;
}
return 0;
}
+
+int efa_com_get_stats(struct efa_com_dev *edev,
+ struct efa_com_get_stats_params *params,
+ union efa_com_get_stats_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_aq_get_stats_cmd cmd = {};
+ struct efa_admin_acq_get_stats_resp resp;
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
+ cmd.type = params->type;
+ cmd.scope = params->scope;
+ cmd.scope_modifier = params->scope_modifier;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get stats type-%u scope-%u.%u [%d]\n",
+ cmd.type, cmd.scope, cmd.scope_modifier, err);
+ return err;
+ }
+
+ result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes;
+ result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts;
+ result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes;
+ result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts;
+ result->basic_stats.rx_drops = resp.basic_stats.rx_drops;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index a1174380462c..7f6c13052f49 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -225,6 +225,26 @@ struct efa_com_dealloc_uar_params {
u16 uarn;
};
+struct efa_com_get_stats_params {
+ /* see enum efa_admin_get_stats_type */
+ u8 type;
+ /* see enum efa_admin_get_stats_scope */
+ u8 scope;
+ u16 scope_modifier;
+};
+
+struct efa_com_basic_stats {
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drops;
+};
+
+union efa_com_get_stats_result {
+ struct efa_com_basic_stats basic_stats;
+};
+
void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
@@ -266,5 +286,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
struct efa_com_alloc_uar_result *result);
int efa_com_dealloc_uar(struct efa_com_dev *edev,
struct efa_com_dealloc_uar_params *params);
+int efa_com_get_stats(struct efa_com_dev *edev,
+ struct efa_com_get_stats_params *params,
+ union efa_com_get_stats_result *result);
#endif /* _EFA_COM_CMD_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index dd1c6d49466f..83858f7e83d0 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -201,6 +201,7 @@ static const struct ib_device_ops efa_dev_ops = {
.driver_id = RDMA_DRIVER_EFA,
.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
+ .alloc_hw_stats = efa_alloc_hw_stats,
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
.create_ah = efa_create_ah,
@@ -212,6 +213,7 @@ static const struct ib_device_ops efa_dev_ops = {
.destroy_ah = efa_destroy_ah,
.destroy_cq = efa_destroy_cq,
.destroy_qp = efa_destroy_qp,
+ .get_hw_stats = efa_get_hw_stats,
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index df77bc312a25..4edae89e8e3c 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -41,6 +41,33 @@ static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
((u64)efa->mmap_page << PAGE_SHIFT);
}
+#define EFA_DEFINE_STATS(op) \
+ op(EFA_TX_BYTES, "tx_bytes") \
+ op(EFA_TX_PKTS, "tx_pkts") \
+ op(EFA_RX_BYTES, "rx_bytes") \
+ op(EFA_RX_PKTS, "rx_pkts") \
+ op(EFA_RX_DROPS, "rx_drops") \
+ op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
+ op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
+ op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
+ op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
+ op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_REG_MR_ERR, "reg_mr_err") \
+ op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
+ op(EFA_CREATE_AH_ERR, "create_ah_err")
+
+#define EFA_STATS_ENUM(ename, name) ename,
+#define EFA_STATS_STR(ename, name) [ename] = name,
+
+enum efa_hw_stats {
+ EFA_DEFINE_STATS(EFA_STATS_ENUM)
+};
+
+static const char *const efa_stats_names[] = {
+ EFA_DEFINE_STATS(EFA_STATS_STR)
+};
+
#define EFA_CHUNK_PAYLOAD_SHIFT 12
#define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
#define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
@@ -121,7 +148,7 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
}
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
- sizeof(((typeof(x) *)0)->fld) <= (sz))
+ FIELD_SIZEOF(typeof(x), fld) <= (sz))
#define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved))
@@ -306,7 +333,7 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->lmc = 1;
props->state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR;
@@ -1473,14 +1500,12 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
- if (mr->umem) {
- params.l_key = mr->ibmr.lkey;
- err = efa_com_dereg_mr(&dev->edev, &params);
- if (err)
- return err;
- }
- ib_umem_release(mr->umem);
+ params.l_key = mr->ibmr.lkey;
+ err = efa_com_dereg_mr(&dev->edev, &params);
+ if (err)
+ return err;
+ ib_umem_release(mr->umem);
kfree(mr);
return 0;
@@ -1727,6 +1752,54 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
efa_ah_destroy(dev, ah);
}
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
+{
+ return rdma_alloc_hw_stats_struct(efa_stats_names,
+ ARRAY_SIZE(efa_stats_names),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port_num, int index)
+{
+ struct efa_com_get_stats_params params = {};
+ union efa_com_get_stats_result result;
+ struct efa_dev *dev = to_edev(ibdev);
+ struct efa_com_basic_stats *bs;
+ struct efa_com_stats_admin *as;
+ struct efa_stats *s;
+ int err;
+
+ params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
+ params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
+
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ bs = &result.basic_stats;
+ stats->value[EFA_TX_BYTES] = bs->tx_bytes;
+ stats->value[EFA_TX_PKTS] = bs->tx_pkts;
+ stats->value[EFA_RX_BYTES] = bs->rx_bytes;
+ stats->value[EFA_RX_PKTS] = bs->rx_pkts;
+ stats->value[EFA_RX_DROPS] = bs->rx_drops;
+
+ as = &dev->edev.aq.stats;
+ stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
+ stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
+
+ s = &dev->stats;
+ stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
+ stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
+ stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
+ stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
+ stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
+ stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
+
+ return ARRAY_SIZE(efa_stats_names);
+}
+
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
u8 port_num)
{
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 4d8510b0fc38..9972e0e6545e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -110,12 +110,6 @@ enum pkt_q_sdma_state {
SDMA_PKT_Q_DEFERRED,
};
-/*
- * Maximum retry attempts to submit a TX request
- * before putting the process to sleep.
- */
-#define MAX_DEFER_RETRY_COUNT 1
-
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
#define SDMA_DBG(req, fmt, ...) \
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 646f61545ed6..9f53f63b1453 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
- pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd,
pbc,
qp->srate_mbps,
vl,
plen);
- /* Update HCRC based on packet opcode */
- pbc = update_hcrc(ps->opcode, pbc);
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+ else
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
}
tx->wqe = qp->s_wqe;
ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
@@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
- pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
-
- /* Update HCRC based on packet opcode */
- pbc = update_hcrc(ps->opcode, pbc);
+ else
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
}
if (cb)
iowait_pio_inc(&priv->s_iowait);
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 54782197c717..d602b698b57e 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -8,8 +8,6 @@ config INFINIBAND_HNS
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
- To compile HIP06 or HIP08 driver as module, choose M here.
-
config INFINIBAND_HNS_HIP06
tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
@@ -17,15 +15,9 @@ config INFINIBAND_HNS_HIP06
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v1
-
config INFINIBAND_HNS_HIP08
tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
-
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index cdd2ac24fc2a..90e08c0c332d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -66,11 +66,9 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
HNS_ROCE_VLAN_SL_SHIFT;
}
- ah->av.port_pd = cpu_to_le32(to_hr_pd(ibah->pd)->pdn |
- (rdma_ah_get_port_num(ah_attr) <<
- HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan = cpu_to_le16(vlan_tag);
+ ah->av.vlan = vlan_tag;
ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
@@ -79,8 +77,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
- ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
- HNS_ROCE_SL_SHIFT);
+ ah->av.sl = rdma_ah_get_sl(ah_attr);
return 0;
}
@@ -91,17 +88,11 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
memset(ah_attr, 0, sizeof(*ah_attr));
- rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_SL_SHIFT));
- rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >>
- HNS_ROCE_PORT_NUM_SHIFT));
+ rdma_ah_set_sl(ah_attr, ah->av.sl);
+ rdma_ah_set_port_num(ah_attr, ah->av.port);
rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
- rdma_ah_set_grh(ah_attr, NULL,
- (le32_to_cpu(ah->av.sl_tclass_flowlabel) &
- HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index,
- ah->av.hop_limit,
- (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_TCLASS_SHIFT));
+ rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel,
+ ah->av.gid_index, ah->av.hop_limit, ah->av.tclass);
rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid);
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 0cd09bf4d7ea..ade26faade8d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -211,7 +211,6 @@ int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
- hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 4e50c22a2da4..22541d19cd09 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -83,7 +83,6 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt,
- struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector)
{
struct hns_roce_cmd_mailbox *mailbox;
@@ -154,7 +153,6 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0;
hr_cq->arm_sn = 1;
- hr_cq->uar = hr_uar;
atomic_set(&hr_cq->refcount, 1);
init_completion(&hr_cq->free);
@@ -298,21 +296,127 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
&buf->hr_buf);
}
+static int create_user_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_cq_resp *resp,
+ int cq_entries)
+{
+ struct hns_roce_ib_create_cq ucmd;
+ struct device *dev = hr_dev->dev;
+ int ret;
+ struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ dev_err(dev, "Failed to copy_from_udata.\n");
+ return -EFAULT;
+ }
+
+ /* Get user space address, write it into mtt table */
+ ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
+ &hr_cq->umem, ucmd.buf_addr,
+ cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to get_cq_umem.\n");
+ return ret;
+ }
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(*resp))) {
+ ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
+ &hr_cq->db);
+ if (ret) {
+ dev_err(dev, "cq record doorbell map failed!\n");
+ goto err_mtt;
+ }
+ hr_cq->db_en = 1;
+ resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
+ }
+
+ return 0;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ ib_umem_release(hr_cq->umem);
+
+ return ret;
+}
+
+static int create_kernel_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, int cq_entries)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_uar *uar;
+ int ret;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (ret)
+ return ret;
+
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ hr_cq->db_en = 1;
+ }
+
+ /* Init mtt table and write buff address to mtt table */
+ ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to alloc_cq_buf.\n");
+ goto err_db;
+ }
+
+ uar = &hr_dev->priv_uar;
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * uar->index;
+
+ return 0;
+
+err_db:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+
+ return ret;
+}
+
+static void destroy_user_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_cq_resp *resp)
+{
+ struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(*resp)))
+ hns_roce_db_unmap_user(context, &hr_cq->db);
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ ib_umem_release(hr_cq->umem);
+}
+
+static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
+{
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+}
+
int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct device *dev = hr_dev->dev;
- struct hns_roce_ib_create_cq ucmd;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
int ret;
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -328,61 +432,21 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
spin_lock_init(&hr_cq->lock);
if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "Failed to copy_from_udata.\n");
- ret = -EFAULT;
- goto err_cq;
- }
-
- /* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
if (ret) {
- dev_err(dev, "Failed to get_cq_umem.\n");
+ dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp))) {
- ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
- &hr_cq->db);
- if (ret) {
- dev_err(dev, "cq record doorbell map failed!\n");
- goto err_mtt;
- }
- hr_cq->db_en = 1;
- resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
- }
-
- /* Get user space parameters */
- uar = &context->uar;
} else {
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
- ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
- if (ret)
- goto err_cq;
-
- hr_cq->set_ci_db = hr_cq->db.db_record;
- *hr_cq->set_ci_db = 0;
- hr_cq->db_en = 1;
- }
-
- /* Init mmt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
- cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
if (ret) {
- dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_db;
+ dev_err(dev, "Create cq failed in kernel mode!\n");
+ goto err_cq;
}
-
- uar = &hr_dev->priv_uar;
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
}
/* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
@@ -416,20 +480,10 @@ err_cqc:
hns_roce_free_cq(hr_dev, hr_cq);
err_dbmap:
- if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp)))
- hns_roce_db_unmap_user(context, &hr_cq->db);
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- ib_umem_release(hr_cq->umem);
- if (!udata)
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
- hr_cq->ib_cq.cqe);
-
-err_db:
- if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
- hns_roce_free_db(hr_dev, &hr_cq->db);
+ if (udata)
+ destroy_user_cq(hr_dev, hr_cq, udata, &resp);
+ else
+ destroy_kernel_cq(hr_dev, hr_cq);
err_cq:
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a548b28aab63..96d1302abde1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -84,7 +84,6 @@
#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
-/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
@@ -128,6 +127,11 @@
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
+/* The chip implementation of the consumer index is calculated
+ * according to twice the actual EQ depth
+ */
+#define EQ_DEPTH_COEFF 2
+
enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
@@ -322,7 +326,7 @@ struct hns_roce_hem_table {
unsigned long num_hem;
/* HEM entry record obj total num */
unsigned long num_obj;
- /*Single obj size */
+ /* Single obj size */
unsigned long obj_size;
unsigned long table_chunk_size;
int lowmem;
@@ -343,7 +347,7 @@ struct hns_roce_mtt {
struct hns_roce_buf_region {
int offset; /* page offset */
- u32 count; /* page count*/
+ u32 count; /* page count */
int hopnum; /* addressing hop num */
};
@@ -384,25 +388,25 @@ struct hns_roce_mr {
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
- u32 access;/* Access permission of MR */
+ u32 access; /* Access permission of MR */
u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
- u64 *pbl_buf;/* MR's PBL space */
+ u64 *pbl_buf; /* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
- u32 pbl_size;/* PA number in the PBL */
- u64 pbl_ba;/* page table address */
- u32 l0_chunk_last_num;/* L0 last number */
- u32 l1_chunk_last_num;/* L1 last number */
- u64 **pbl_bt_l2;/* PBL BT L2 */
- u64 **pbl_bt_l1;/* PBL BT L1 */
- u64 *pbl_bt_l0;/* PBL BT L0 */
- dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
- dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
- dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
- u32 pbl_ba_pg_sz;/* BT chunk page size */
- u32 pbl_buf_pg_sz;/* buf chunk page size */
- u32 pbl_hop_num;/* multi-hop number */
+ u32 pbl_size; /* PA number in the PBL */
+ u64 pbl_ba; /* page table address */
+ u32 l0_chunk_last_num; /* L0 last number */
+ u32 l1_chunk_last_num; /* L1 last number */
+ u64 **pbl_bt_l2; /* PBL BT L2 */
+ u64 **pbl_bt_l1; /* PBL BT L1 */
+ u64 *pbl_bt_l0; /* PBL BT L0 */
+ dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
+ dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
+ dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
+ u32 pbl_ba_pg_sz; /* BT chunk page size */
+ u32 pbl_buf_pg_sz; /* buf chunk page size */
+ u32 pbl_hop_num; /* multi-hop number */
};
struct hns_roce_mr_table {
@@ -425,16 +429,16 @@ struct hns_roce_wq {
u32 max_post;
int max_gs;
int offset;
- int wqe_shift;/* WQE size */
+ int wqe_shift; /* WQE size */
u32 head;
u32 tail;
void __iomem *db_reg_l;
};
struct hns_roce_sge {
- int sge_cnt; /* SGE num */
+ int sge_cnt; /* SGE num */
int offset;
- int sge_shift;/* SGE size */
+ int sge_shift; /* SGE size */
};
struct hns_roce_buf_list {
@@ -569,14 +573,16 @@ struct hns_roce_raq_table {
};
struct hns_roce_av {
- __le32 port_pd;
+ u8 port;
u8 gid_index;
u8 stat_rate;
u8 hop_limit;
- __le32 sl_tclass_flowlabel;
+ u32 flowlabel;
+ u8 sl;
+ u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
- __le16 vlan;
+ u16 vlan;
bool vlan_en;
};
@@ -618,7 +624,6 @@ struct hns_roce_cmdq {
* close device, switch into poll mode(non event mode)
*/
u8 use_events;
- u8 toggle;
};
struct hns_roce_cmd_mailbox {
@@ -652,10 +657,8 @@ struct hns_roce_qp {
u8 rdb_en;
u8 sdb_en;
u32 doorbell_qpn;
- __le32 sq_signal_bits;
+ u32 sq_signal_bits;
u32 sq_next_wqe;
- int sq_max_wqes_per_wr;
- int sq_spare_wqes;
struct hns_roce_wq sq;
struct ib_umem *umem;
@@ -709,7 +712,7 @@ enum {
};
struct hns_roce_ceqe {
- u32 comp;
+ __le32 comp;
};
struct hns_roce_aeqe {
@@ -752,7 +755,7 @@ struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
void __iomem *doorbell;
- int type_flag;/* Aeq:1 ceq:0 */
+ int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
int log_entries;
@@ -798,22 +801,22 @@ struct hns_roce_caps {
int local_ca_ack_delay;
int num_uars;
u32 phy_num_uars;
- u32 max_sq_sg; /* 2 */
- u32 max_sq_inline; /* 32 */
- u32 max_rq_sg; /* 2 */
+ u32 max_sq_sg;
+ u32 max_sq_inline;
+ u32 max_rq_sg;
u32 max_extend_sg;
- int num_qps; /* 256k */
+ int num_qps;
int reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
u32 max_srq_sg;
int num_srqs;
- u32 max_wqes; /* 16k */
+ u32 max_wqes;
u32 max_srqs;
u32 max_srq_wrs;
u32 max_srq_sges;
- u32 max_sq_desc_sz; /* 64 */
- u32 max_rq_desc_sz; /* 64 */
+ u32 max_sq_desc_sz;
+ u32 max_rq_desc_sz;
u32 max_srq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
@@ -824,7 +827,7 @@ struct hns_roce_caps {
int reserved_cqs;
int reserved_srqs;
u32 max_srqwqes;
- int num_aeq_vectors; /* 1 */
+ int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
@@ -905,7 +908,7 @@ struct hns_roce_caps {
u32 sl_num;
u32 tsq_buf_pg_sz;
u32 tpq_buf_pg_sz;
- u32 chunk_sz; /* chunk size in non multihop mode*/
+ u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
};
@@ -991,16 +994,6 @@ struct hns_roce_hw {
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
-enum hns_phy_state {
- HNS_ROCE_PHY_SLEEP = 1,
- HNS_ROCE_PHY_POLLING = 2,
- HNS_ROCE_PHY_DISABLED = 3,
- HNS_ROCE_PHY_TRAINING = 4,
- HNS_ROCE_PHY_LINKUP = 5,
- HNS_ROCE_PHY_LINKERR = 6,
- HNS_ROCE_PHY_TEST = 7
-};
-
struct hns_roce_dev {
struct ib_device ib_dev;
struct platform_device *pdev;
@@ -1045,8 +1038,8 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
- dma_addr_t tptr_dma_addr; /*only for hw v1*/
- u32 tptr_size; /*only for hw v1*/
+ dma_addr_t tptr_dma_addr; /* only for hw v1 */
+ u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f4da5bd2884f..e82215774032 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -41,29 +41,57 @@
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{
- if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
- (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
- (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
- (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
- (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
- (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
- (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
- (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
- (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
- (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
- (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
- return true;
-
- return false;
+ int hop_num = 0;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_SCCC:
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQE:
+ hop_num = hr_dev->caps.cqe_hop_num;
+ break;
+ case HEM_TYPE_MTT:
+ hop_num = hr_dev->caps.mtt_hop_num;
+ break;
+ case HEM_TYPE_SRQWQE:
+ hop_num = hr_dev->caps.srqwqe_hop_num;
+ break;
+ case HEM_TYPE_IDX:
+ hop_num = hr_dev->caps.idx_hop_num;
+ break;
+ default:
+ return false;
+ }
+
+ return hop_num ? true : false;
}
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
- u32 bt_chunk_num)
+ u32 bt_chunk_num, u64 hem_max_num)
{
- int i;
+ u64 check_max_num = start_idx + bt_chunk_num;
+ u64 i;
- for (i = 0; i < bt_chunk_num; i++)
- if (hem[start_idx + i])
+ for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
+ if (hem[i])
return false;
return true;
@@ -92,17 +120,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
return 0;
}
-int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, unsigned long *obj,
- struct hns_roce_hem_mhop *mhop)
+static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_mhop *mhop,
+ u32 type)
{
struct device *dev = hr_dev->dev;
- u32 chunk_ba_num;
- u32 table_idx;
- u32 bt_num;
- u32 chunk_size;
- switch (table->type) {
+ switch (type) {
case HEM_TYPE_QPC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
@@ -193,10 +217,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
- table->type);
+ type);
return -EINVAL;
}
+ return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop)
+{
+ struct device *dev = hr_dev->dev;
+ u32 chunk_ba_num;
+ u32 table_idx;
+ u32 bt_num;
+ u32 chunk_size;
+
+ if (get_hem_table_config(hr_dev, mhop, table->type))
+ return -EINVAL;
+
if (!obj)
return 0;
@@ -324,13 +364,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
{
spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev;
- unsigned long end = 0;
+ long end;
unsigned long flags;
struct hns_roce_hem_iter iter;
void __iomem *bt_cmd;
- u32 bt_cmd_h_val = 0;
- u32 bt_cmd_val[2];
- u32 bt_cmd_l = 0;
+ __le32 bt_cmd_val[2];
+ __le32 bt_cmd_h = 0;
+ __le32 bt_cmd_l = 0;
u64 bt_ba = 0;
int ret = 0;
@@ -340,30 +380,20 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
switch (table->type) {
case HEM_TYPE_QPC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
- break;
case HEM_TYPE_MTPT:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_MTPT);
- break;
case HEM_TYPE_CQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
- break;
case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_SRQC);
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
break;
default:
return ret;
}
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+ roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
/* Currently iter only a chunk */
for (hns_roce_hem_first(table->hem[i], &iter);
@@ -375,7 +405,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = HW_SYNC_TIMEOUT_MSECS;
- while (end) {
+ while (end > 0) {
if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
break;
@@ -389,13 +419,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return -EBUSY;
}
- bt_cmd_l = (u32)bt_ba;
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ bt_cmd_l = cpu_to_le32(bt_ba);
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
bt_ba >> BT_BA_SHIFT);
bt_cmd_val[0] = bt_cmd_l;
- bt_cmd_val[1] = bt_cmd_h_val;
+ bt_cmd_val[1] = bt_cmd_h;
hns_roce_write64_k(bt_cmd_val,
hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(lock, flags);
@@ -457,6 +487,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
+ if (unlikely(hem_idx >= table->num_hem)) {
+ dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
+ table->type, hem_idx, table->num_hem);
+ return -EINVAL;
+ }
+
mutex_lock(&table->mutex);
if (table->hem[hem_idx]) {
@@ -693,7 +729,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
if (check_whether_bt_num_2(table->type, hop_num)) {
start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num)) {
+ chunk_ba_num, table->num_hem)) {
if (table->type < HEM_TYPE_MTT &&
hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
@@ -707,7 +743,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
mhop.l1_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num)) {
+ chunk_ba_num, table->num_hem)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
dev_warn(dev, "Clear HEM base address failed.\n");
@@ -791,7 +827,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
} else {
u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
- hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
+ goto out;
/* mtt mhop */
i = mhop.l0_idx;
j = mhop.l1_idx;
@@ -840,11 +877,13 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
{
struct hns_roce_hem_mhop mhop;
unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i;
+ unsigned long i = 0;
int ret;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ if (ret)
+ goto fail;
inc = mhop.bt_chunk_size / table->obj_size;
}
@@ -874,7 +913,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+ return;
inc = mhop.bt_chunk_size / table->obj_size;
}
@@ -887,7 +927,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj,
int use_lowmem)
{
- struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk;
unsigned long num_hem;
@@ -900,99 +939,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem)
return -ENOMEM;
} else {
+ struct hns_roce_hem_mhop mhop = {};
unsigned long buf_chunk_size;
unsigned long bt_chunk_size;
unsigned long bt_chunk_num;
unsigned long num_bt_l0 = 0;
u32 hop_num;
- switch (type) {
- case HEM_TYPE_QPC:
- buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.qpc_bt_num;
- hop_num = hr_dev->caps.qpc_hop_num;
- break;
- case HEM_TYPE_MTPT:
- buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.mpt_bt_num;
- hop_num = hr_dev->caps.mpt_hop_num;
- break;
- case HEM_TYPE_CQC:
- buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.cqc_bt_num;
- hop_num = hr_dev->caps.cqc_hop_num;
- break;
- case HEM_TYPE_SCCC:
- buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.sccc_bt_num;
- hop_num = hr_dev->caps.sccc_hop_num;
- break;
- case HEM_TYPE_QPC_TIMER:
- buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
- hop_num = hr_dev->caps.qpc_timer_hop_num;
- break;
- case HEM_TYPE_CQC_TIMER:
- buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
- hop_num = hr_dev->caps.cqc_timer_hop_num;
- break;
- case HEM_TYPE_SRQC:
- buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
- + PAGE_SHIFT);
- num_bt_l0 = hr_dev->caps.srqc_bt_num;
- hop_num = hr_dev->caps.srqc_hop_num;
- break;
- case HEM_TYPE_MTT:
- buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = buf_chunk_size;
- hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_CQE:
- buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = buf_chunk_size;
- hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = buf_chunk_size;
- hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
- + PAGE_SHIFT);
- bt_chunk_size = buf_chunk_size;
- hop_num = hr_dev->caps.idx_hop_num;
- break;
- default:
- dev_err(dev,
- "Table %d not support to init hem table here!\n",
- type);
+ if (get_hem_table_config(hr_dev, &mhop, type))
return -EINVAL;
- }
+
+ buf_chunk_size = mhop.buf_chunk_size;
+ bt_chunk_size = mhop.bt_chunk_size;
+ num_bt_l0 = mhop.ba_l0_num;
+ hop_num = mhop.hop_num;
+
obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
@@ -1075,7 +1036,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
int i;
u64 obj;
- hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+ return;
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
mhop.bt_chunk_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index f1ccb8f35fe5..86783276fb1f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -102,9 +102,9 @@ struct hns_roce_hem_mhop {
u32 buf_chunk_size;
u32 bt_chunk_size;
u32 ba_l0_num;
- u32 l0_idx;/* level 0 base address table index */
- u32 l1_idx;/* level 1 base address table index */
- u32 l2_idx;/* level 2 base address table index */
+ u32 l0_idx; /* level 0 base address table index */
+ u32 l1_idx; /* level 1 base address table index */
+ u32 l2_idx; /* level 2 base address table index */
};
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index c07e387a07a3..4b5b9cfcd26f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -73,7 +73,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
int ps_opcode = 0, i = 0;
unsigned long flags = 0;
void *wqe = NULL;
- u32 doorbell[2];
+ __le32 doorbell[2];
int nreq = 0;
u32 ind = 0;
int ret = 0;
@@ -175,13 +175,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_FLOW_LABEL_M,
UD_SEND_WQE_U32_36_FLOW_LABEL_S,
- ah->av.sl_tclass_flowlabel &
- HNS_ROCE_FLOW_LABEL_MASK);
+ ah->av.flowlabel);
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_PRIORITY_M,
UD_SEND_WQE_U32_36_PRIORITY_S,
- le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_SL_SHIFT);
+ ah->av.sl);
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
@@ -195,8 +193,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->u32_40,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
- ah->av.sl_tclass_flowlabel >>
- HNS_ROCE_TCLASS_SHIFT);
+ ah->av.tclass);
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
@@ -335,10 +332,10 @@ out:
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
- doorbell[0] = le32_to_cpu(sq_db.u32_4);
- doorbell[1] = le32_to_cpu(sq_db.u32_8);
+ doorbell[0] = sq_db.u32_4;
+ doorbell[1] = sq_db.u32_8;
- hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
+ hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
}
@@ -363,7 +360,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_rq_db rq_db;
- uint32_t doorbell[2] = {0};
+ __le32 doorbell[2] = {0};
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
@@ -437,11 +434,10 @@ out:
roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
1);
- doorbell[0] = le32_to_cpu(rq_db.u32_4);
- doorbell[1] = le32_to_cpu(rq_db.u32_8);
+ doorbell[0] = rq_db.u32_4;
+ doorbell[1] = rq_db.u32_8;
- hns_roce_write64_k((__le32 *)doorbell,
- hr_qp->rq.db_reg_l);
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -715,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
struct ib_cq *cq;
struct ib_pd *pd;
union ib_gid dgid;
- u64 subnet_prefix;
+ __be64 subnet_prefix;
int attr_mask = 0;
int ret;
int i, j;
@@ -971,7 +967,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv;
struct completion comp;
- unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+ long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
@@ -991,7 +987,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
- while (end) {
+ while (end > 0) {
if (try_wait_for_completion(&comp))
return 0;
msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
@@ -1109,7 +1105,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv;
struct completion comp;
- unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
+ long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
unsigned long start = jiffies;
int npages;
int ret = 0;
@@ -1139,7 +1135,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
queue_work(free_mr->free_mr_wq, &(mr_work->work));
- while (end) {
+ while (end > 0) {
if (try_wait_for_completion(&comp))
goto free_mr;
msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
@@ -2165,7 +2161,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
- __le32 doorbell[2];
+ __le32 doorbell[2] = {};
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
@@ -2430,7 +2426,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
- unsigned long end = 0, flags = 0;
+ unsigned long flags = 0;
+ long end = HW_SYNC_TIMEOUT_MSECS;
__le32 bt_cmd_val[2] = {0};
void __iomem *bt_cmd;
u64 bt_ba = 0;
@@ -2439,18 +2436,12 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
switch (table->type) {
case HEM_TYPE_QPC:
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
bt_ba = priv->bt_table.qpc_buf.map >> 12;
break;
case HEM_TYPE_MTPT:
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
bt_ba = priv->bt_table.mtpt_buf.map >> 12;
break;
case HEM_TYPE_CQC:
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
bt_ba = priv->bt_table.cqc_buf.map >> 12;
break;
case HEM_TYPE_SRQC:
@@ -2459,6 +2450,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
default:
return 0;
}
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
@@ -2468,7 +2461,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
- end = HW_SYNC_TIMEOUT_MSECS;
while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!end) {
@@ -2484,7 +2476,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
end -= HW_SYNC_SLEEP_TIME_INTERVAL;
}
- bt_cmd_val[0] = (__le32)bt_ba;
+ bt_cmd_val[0] = cpu_to_le32(bt_ba);
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
@@ -2627,7 +2619,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
- le32_to_cpu(hr_qp->sq_signal_bits));
+ hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -2933,7 +2925,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1);
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- le32_to_cpu(hr_qp->sq_signal_bits));
+ hr_qp->sq_signal_bits);
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port;
@@ -3578,7 +3570,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = (u8)context->rnr_retry;
+ qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -4021,7 +4013,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqes_found = 1;
- if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
+ if (eq->cons_index >
+ EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
dev_warn(&eq->hr_dev->pdev->dev,
"cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
@@ -4524,7 +4517,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
struct platform_device *pdev = NULL;
struct net_device *netdev = NULL;
struct device_node *net_node;
- struct resource *res;
int port_cnt = 0;
u8 phy_port;
int ret;
@@ -4563,8 +4555,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
}
/* get the mapped register base address */
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- hr_dev->reg_base = devm_ioremap_resource(dev, res);
+ hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
@@ -4639,10 +4630,8 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* fetch the interrupt numbers */
for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0) {
- dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+ if (hr_dev->irq[i] <= 0)
return -EINVAL;
- }
}
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b76e3beeafb8..7a89d669f8bf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -239,7 +239,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
- unsigned int sge_ind = 0;
+ unsigned int sge_ind;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
@@ -397,18 +397,15 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- ah->av.sl_tclass_flowlabel >>
- HNS_ROCE_TCLASS_SHIFT);
+ ah->av.tclass);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
- ah->av.sl_tclass_flowlabel &
- HNS_ROCE_FLOW_LABEL_MASK);
+ ah->av.flowlabel);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_SL_M,
V2_UD_SEND_WQE_BYTE_40_SL_S,
- le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_SL_SHIFT);
+ ah->av.sl);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_PORTN_M,
V2_UD_SEND_WQE_BYTE_40_PORTN_S,
@@ -887,8 +884,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
- (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
- HNS_ROCE_CMQ_ENABLE);
+ ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
} else {
@@ -896,8 +892,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
- (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
- HNS_ROCE_CMQ_ENABLE);
+ ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
}
@@ -1044,7 +1039,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
- if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
+ if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
do {
if (hns_roce_cmq_csq_done(hr_dev))
break;
@@ -1061,7 +1056,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
desc_to_use = &csq->desc[ntc];
desc[handle] = *desc_to_use;
dev_dbg(hr_dev->dev, "Get cmq desc:\n");
- desc_ret = desc[handle].retval;
+ desc_ret = le16_to_cpu(desc[handle].retval);
if (desc_ret == CMD_EXEC_SUCCESS)
ret = 0;
else
@@ -1124,32 +1119,124 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
return ret;
resp = (struct hns_roce_query_version *)desc.data;
- hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
+ hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
hr_dev->vendor_id = hr_dev->pci_dev->vendor;
return 0;
}
+static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long reset_cnt;
+ bool sw_resetting;
+ bool hw_resetting;
+
+ reset_cnt = ops->ae_dev_reset_cnt(handle);
+ hw_resetting = ops->get_hw_reset_stat(handle);
+ sw_resetting = ops->ae_dev_resetting(handle);
+
+ if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
+ return true;
+
+ return false;
+}
+
+static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
+ int flag)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long instance_stage;
+ unsigned long reset_cnt;
+ unsigned long end;
+ bool sw_resetting;
+ bool hw_resetting;
+
+ instance_stage = handle->rinfo.instance_state;
+ reset_cnt = ops->ae_dev_reset_cnt(handle);
+ hw_resetting = ops->get_hw_reset_stat(handle);
+ sw_resetting = ops->ae_dev_resetting(handle);
+
+ if (reset_cnt != hr_dev->reset_cnt) {
+ hr_dev->dis_db = true;
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev, "Func clear success after reset.\n");
+ } else if (hw_resetting) {
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (!ops->get_hw_reset_stat(handle)) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after reset.\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
+ } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (ops->ae_dev_reset_cnt(handle) !=
+ hr_dev->reset_cnt) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after sw reset\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+ } else {
+ if (retval && !flag)
+ dev_warn(hr_dev->dev,
+ "Func clear read failed, ret = %d.\n", retval);
+
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
+ }
+}
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{
+ bool fclr_write_fail_flag = false;
struct hns_roce_func_clear *resp;
struct hns_roce_cmq_desc desc;
unsigned long end;
- int ret;
+ int ret = 0;
+
+ if (hns_roce_func_clr_chk_rst(hr_dev))
+ goto out;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
resp = (struct hns_roce_func_clear *)desc.data;
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
+ fclr_write_fail_flag = true;
dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
ret);
- return;
+ goto out;
}
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
while (end) {
+ if (hns_roce_func_clr_chk_rst(hr_dev))
+ goto out;
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
@@ -1166,7 +1253,9 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
}
}
+out:
dev_err(hr_dev->dev, "Func clear fail.\n");
+ hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
}
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
@@ -1298,7 +1387,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
swt = (struct hns_roce_vf_switch *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
- swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
+ swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
roce_set_field(swt->fun_id,
VF_SWITCH_DATA_FUN_ID_VF_ID_M,
VF_SWITCH_DATA_FUN_ID_VF_ID_S,
@@ -1310,7 +1399,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
- roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
+ roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -1724,9 +1813,10 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
if (i == 0) {
- req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
- req_a->base_addr_h = (link_tbl->table.map >> 32) &
- 0xffffffff;
+ req_a->base_addr_l =
+ cpu_to_le32(link_tbl->table.map & 0xffffffff);
+ req_a->base_addr_h =
+ cpu_to_le32(link_tbl->table.map >> 32);
roce_set_field(req_a->depth_pgsz_init_en,
CFG_LLM_QUE_DEPTH_M,
CFG_LLM_QUE_DEPTH_S,
@@ -1735,13 +1825,15 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
CFG_LLM_QUE_PGSZ_M,
CFG_LLM_QUE_PGSZ_S,
link_tbl->pg_sz);
- req_a->head_ba_l = entry[0].blk_ba0;
- req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
+ req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
+ req_a->head_ba_h_nxtptr =
+ cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
roce_set_field(req_a->head_ptr,
CFG_LLM_HEAD_PTR_M,
CFG_LLM_HEAD_PTR_S, 0);
} else {
- req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
+ req_b->tail_ba_l =
+ cpu_to_le32(entry[page_num - 1].blk_ba0);
roce_set_field(req_b->tail_ba_h,
CFG_LLM_TAIL_BA_H_M,
CFG_LLM_TAIL_BA_H_S,
@@ -1817,17 +1909,13 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
link_tbl->pg_list[i].map = t;
- entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
- roce_set_field(entry[i].blk_ba1_nxt_ptr,
- HNS_ROCE_LINK_TABLE_BA1_M,
- HNS_ROCE_LINK_TABLE_BA1_S,
- t >> 44);
+ entry[i].blk_ba0 = (u32)(t >> 12);
+ entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
if (i < (pg_num - 1))
- roce_set_field(entry[i].blk_ba1_nxt_ptr,
- HNS_ROCE_LINK_TABLE_NXT_PTR_M,
- HNS_ROCE_LINK_TABLE_NXT_PTR_S,
- i + 1);
+ entry[i].blk_ba1_nxt_ptr |=
+ (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
+
}
link_tbl->npages = pg_num;
link_tbl->pg_sz = buf_chk_sz;
@@ -1888,7 +1976,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed;
}
- /* Alloc memory for QPC Timer buffer space chunk*/
+ /* Alloc memory for QPC Timer buffer space chunk */
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
qpc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
@@ -1899,7 +1987,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
}
}
- /* Alloc memory for CQC Timer buffer space chunk*/
+ /* Alloc memory for CQC Timer buffer space chunk */
for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
cqc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
@@ -1952,7 +2040,7 @@ static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
if (status)
return status;
- return cpu_to_le32(mb_st->mb_status_hw_run);
+ return le32_to_cpu(mb_st->mb_status_hw_run);
}
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
@@ -1978,10 +2066,10 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
- mb->in_param_l = cpu_to_le64(in_param);
- mb->in_param_h = cpu_to_le64(in_param) >> 32;
- mb->out_param_l = cpu_to_le64(out_param);
- mb->out_param_h = cpu_to_le64(out_param) >> 32;
+ mb->in_param_l = cpu_to_le32(in_param);
+ mb->in_param_h = cpu_to_le32(in_param >> 32);
+ mb->out_param_l = cpu_to_le32(out_param);
+ mb->out_param_h = cpu_to_le32(out_param >> 32);
mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
mb->token_event_en = cpu_to_le32(event << 16 | token);
@@ -2123,7 +2211,7 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
roce_set_field(smac_tb->vf_smac_h_rsv,
CFG_SMAC_TB_VF_SMAC_H_M,
CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
- smac_tb->vf_smac_l = reg_smac_l;
+ smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
@@ -2409,7 +2497,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
++prod_index) {
- if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
break;
}
@@ -2478,29 +2566,26 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S, vector);
- cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
- cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
- cq_context->cqe_cur_blk_addr =
- cpu_to_le32(cq_context->cqe_cur_blk_addr);
+ cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
- cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
+ mtts[0] >> (32 + PAGE_ADDR_SHIFT));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
- cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
- cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
+ mtts[1] >> (32 + PAGE_ADDR_SHIFT));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
@@ -2510,7 +2595,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
- cq_context->cqe_ba = (u32)(dma_handle >> 3);
+ cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
@@ -2523,7 +2608,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
((u32)hr_cq->db.dma) >> 1);
- cq_context->db_record_addr = hr_cq->db.dma >> 32;
+ cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M,
@@ -2541,7 +2626,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
- u32 doorbell[2];
+ __le32 doorbell[2];
doorbell[0] = 0;
doorbell[1] = 0;
@@ -2668,9 +2753,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
} else if ((*cur_qp)->ibqp.srq) {
srq = to_hr_srq((*cur_qp)->ibqp.srq);
- wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S));
+ wqe_ctr = (u16)roce_get_field(cqe->byte_4,
+ V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S);
wc->wr_id = srq->wrid[wqe_ctr];
hns_roce_free_srq_wqe(srq, wqe_ctr);
} else {
@@ -2862,15 +2947,16 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->smac[5] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_5_M,
V2_CQE_BYTE_28_SMAC_5_S);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_VID_M,
V2_CQE_BYTE_28_VID_S);
+ wc->wc_flags |= IB_WC_WITH_VLAN;
} else {
wc->vlan_id = 0xffff;
}
- wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
wc->network_hdr_type = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_PORT_TYPE_M,
V2_CQE_BYTE_28_PORT_TYPE_S);
@@ -2905,11 +2991,49 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
return npolled;
}
+static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
+ int step_idx)
+{
+ int op;
+
+ if (type == HEM_TYPE_SCCC && step_idx)
+ return -EINVAL;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
+ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
+ break;
+ default:
+ dev_warn(hr_dev->dev,
+ "Table %d not to be written by mailbox!\n", type);
+ return -EINVAL;
+ }
+
+ return op + step_idx;
+}
+
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
@@ -2922,7 +3046,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
u64 bt_ba = 0;
u32 chunk_ba_num;
u32 hop_num;
- u16 op = 0xff;
+ int op;
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0;
@@ -2944,39 +3068,10 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
hem_idx = i;
}
- switch (table->type) {
- case HEM_TYPE_QPC:
- op = HNS_ROCE_CMD_WRITE_QPC_BT0;
- break;
- case HEM_TYPE_MTPT:
- op = HNS_ROCE_CMD_WRITE_MPT_BT0;
- break;
- case HEM_TYPE_CQC:
- op = HNS_ROCE_CMD_WRITE_CQC_BT0;
- break;
- case HEM_TYPE_SRQC:
- op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
- break;
- case HEM_TYPE_SCCC:
- op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
- break;
- case HEM_TYPE_QPC_TIMER:
- op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
- break;
- case HEM_TYPE_CQC_TIMER:
- op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
- break;
- default:
- dev_warn(dev, "Table %d not to be written by mailbox!\n",
- table->type);
- return 0;
- }
-
- if (table->type == HEM_TYPE_SCCC && step_idx)
+ op = get_op_for_set_hem(hr_dev, table->type, step_idx);
+ if (op == -EINVAL)
return 0;
- op += step_idx;
-
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -3118,6 +3213,43 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
}
+static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ hr_qp->sq.max_gs >
+ HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+ (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
+ hr_qp->ibqp.srq) ? 0 :
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -3138,21 +3270,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI)
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sge.sge_cnt));
- else
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
-
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
@@ -3168,19 +3285,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, 0);
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
-
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+ set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
/* No VLAN need to set 0xFFF */
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
@@ -3225,7 +3330,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
- context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
+ context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
qpc_mask->rq_db_record_addr = 0;
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
@@ -3456,22 +3561,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI)
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sge.sge_cnt));
- else
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
-
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
if (attr_mask & IB_QP_ACCESS_FLAGS) {
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
@@ -3506,20 +3595,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
0);
}
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
-
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
-
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
@@ -3638,7 +3713,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
}
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3);
+ context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
/*
@@ -3694,7 +3769,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
- context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
qpc_mask->rq_cur_blk_addr = 0;
roce_set_field(context->byte_92_srq_info,
@@ -3705,7 +3780,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
- context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
+ context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
qpc_mask->rq_nxt_blk_addr = 0;
roce_set_field(context->byte_104_rq_sge,
@@ -3720,7 +3795,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, 0);
- context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
+ context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
qpc_mask->trrl_ba = 0;
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S,
@@ -3728,7 +3803,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S, 0);
- context->irrl_ba = (u32)(dma_handle_2 >> 6);
+ context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
qpc_mask->irrl_ba = 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S,
@@ -3876,7 +3951,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
+ context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
@@ -3888,8 +3963,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- ((u32)(sge_cur_blk >>
- PAGE_ADDR_SHIFT)) : 0;
+ cpu_to_le32(sge_cur_blk >>
+ PAGE_ADDR_SHIFT) : 0;
roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
@@ -3902,7 +3977,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
- context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
+ context->rx_sq_cur_blk_addr =
+ cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_232_irrl_sge,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
@@ -3974,30 +4050,119 @@ static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
}
-static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
+static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_v2_qp_context *context;
- struct hns_roce_v2_qp_context *qpc_mask;
- struct device *dev = hr_dev->dev;
- int ret = -EINVAL;
+ const struct ib_gid_attr *gid_attr = NULL;
+ int is_roce_protocol;
+ bool is_udp = false;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+ u8 hr_port;
+ int ret;
- context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
- if (!context)
- return -ENOMEM;
+ ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
+ hr_port = ib_port - 1;
+ is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ if (ret)
+ return ret;
+
+ if (gid_attr)
+ is_udp = (gid_attr->gid_type ==
+ IB_GID_TYPE_ROCE_UDP_ENCAP);
+ }
+
+ if (vlan < VLAN_CFI_MASK) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
+ roce_set_bit(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+ }
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0);
+
+ if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
+ dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
+ return -EINVAL;
+ }
+
+ if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
+ dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+ return -EINVAL;
+ }
+
+ roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
+ V2_QPC_BYTE_52_UDPSPN_S,
+ is_udp ? 0x12b7 : 0);
+
+ roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
+ V2_QPC_BYTE_52_UDPSPN_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
+ grh->sgid_index);
+
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+ if (hr_dev->pci_dev->revision == 0x21 && is_udp)
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
+ else
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, 0);
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, grh->flow_label);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, 0);
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ return 0;
+}
+
+static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ int ret = 0;
- qpc_mask = context + 1;
- /*
- * In v2 engine, software pass context and context mask to hardware
- * when modifying qp. If software need modify some fields in context,
- * we should set all bits of the relevant fields in context mask to
- * 0 at the same time, else set them to 0x1.
- */
- memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4019,134 +4184,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* Nothing */
;
} else {
- dev_err(dev, "Illegal state for QP!\n");
+ dev_err(hr_dev->dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out;
}
- /* When QP state is err, SQ and RQ WQE should be flushed */
- if (new_state == IB_QPS_ERR) {
- roce_set_field(context->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
- hr_qp->sq.head);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+out:
+ return ret;
+}
- if (!ibqp->srq) {
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
- hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
- }
- }
+static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int ret = 0;
if (attr_mask & IB_QP_AV) {
- const struct ib_global_route *grh =
- rdma_ah_read_grh(&attr->ah_attr);
- const struct ib_gid_attr *gid_attr = NULL;
- int is_roce_protocol;
- u16 vlan = 0xffff;
- u8 ib_port;
- u8 hr_port;
-
- ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
- hr_qp->port + 1;
- hr_port = ib_port - 1;
- is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
- rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
-
- if (is_roce_protocol) {
- gid_attr = attr->ah_attr.grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
- if (ret)
- goto out;
- }
-
- if (vlan < VLAN_CFI_MASK) {
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
- roce_set_bit(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
- }
-
- roce_set_field(context->byte_24_mtu_tc,
- V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan);
- roce_set_field(qpc_mask->byte_24_mtu_tc,
- V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, 0);
-
- if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
- dev_err(hr_dev->dev,
- "sgid_index(%u) too large. max is %d\n",
- grh->sgid_index,
- hr_dev->caps.gid_table_len[hr_port]);
- ret = -EINVAL;
- goto out;
- }
-
- if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
- dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
- ret = -EINVAL;
- goto out;
- }
-
- roce_set_field(context->byte_52_udpspn_dmac,
- V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
- (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
- 0 : 0x12b7);
-
- roce_set_field(qpc_mask->byte_52_udpspn_dmac,
- V2_QPC_BYTE_52_UDPSPN_M,
- V2_QPC_BYTE_52_UDPSPN_S, 0);
-
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M,
- V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
-
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M,
- V2_QPC_BYTE_20_SGID_IDX_S, 0);
-
- roce_set_field(context->byte_24_mtu_tc,
- V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
- roce_set_field(qpc_mask->byte_24_mtu_tc,
- V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
-
- if (hr_dev->pci_dev->revision == 0x21 &&
- gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- roce_set_field(context->byte_24_mtu_tc,
- V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
- grh->traffic_class >> 2);
- else
- roce_set_field(context->byte_24_mtu_tc,
- V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
- grh->traffic_class);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, 0);
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, grh->flow_label);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, 0);
- memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ return ret;
}
if (attr_mask & IB_QP_TIMEOUT) {
@@ -4158,7 +4219,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0);
} else {
- dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+ dev_warn(hr_dev->dev,
+ "Local ACK timeout shall be 0 to 30.\n");
}
}
@@ -4196,6 +4258,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
}
+ /* RC&UC&UD required attr */
if (attr_mask & IB_QP_SQ_PSN) {
roce_set_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -4290,11 +4353,85 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_QKEY) {
- context->qkey_xrcd = attr->qkey;
+ context->qkey_xrcd = cpu_to_le32(attr->qkey);
qpc_mask->qkey_xrcd = 0;
hr_qp->qkey = attr->qkey;
}
+ return ret;
+}
+
+static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context ctx[2];
+ struct hns_roce_v2_qp_context *context = ctx;
+ struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ memset(context, 0, sizeof(*context));
+ memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+ ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
+ new_state, context, qpc_mask);
+ if (ret)
+ goto out;
+
+ /* When QP state is err, SQ and RQ WQE should be flushed */
+ if (new_state == IB_QPS_ERR) {
+ roce_set_field(context->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+ hr_qp->sq.head);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+
+ if (!ibqp->srq) {
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ }
+ }
+
+ /* Configure the optional fields */
+ ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
ibqp->srq ? 1 : 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
@@ -4307,8 +4444,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
- ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state,
- context, hr_qp);
+ ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
goto out;
@@ -4316,15 +4452,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- hr_qp->atomic_rd_en = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT) {
- hr_qp->port = attr->port_num - 1;
- hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- }
+ hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -4344,7 +4472,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(context);
return ret;
}
@@ -4395,16 +4522,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context context = {};
struct device *dev = hr_dev->dev;
int tmp_qp_state;
int state;
int ret;
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
memset(qp_attr, 0, sizeof(*qp_attr));
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
@@ -4416,14 +4539,14 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto done;
}
- ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) {
dev_err(dev, "query qpc error\n");
ret = -EINVAL;
goto out;
}
- state = roce_get_field(context->byte_60_qpst_tempid,
+ state = roce_get_field(context.byte_60_qpst_tempid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
@@ -4433,7 +4556,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
}
hr_qp->state = (u8)tmp_qp_state;
qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
- qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
+ qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S);
qp_attr->path_mig_state = IB_MIG_ARMED;
@@ -4441,20 +4564,20 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
qp_attr->qkey = V2_QKEY_VAL;
- qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
+ qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S);
- qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
+ qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S);
- qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
+ qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S);
- qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
- ((roce_get_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
- ((roce_get_bit(context->byte_76_srqn_op_en,
+ qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
+ ((roce_get_bit(context.byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
+ ((roce_get_bit(context.byte_76_srqn_op_en,
V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
@@ -4463,43 +4586,43 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
rdma_ah_set_sl(&qp_attr->ah_attr,
- roce_get_field(context->byte_28_at_fl,
+ roce_get_field(context.byte_28_at_fl,
V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S));
- grh->flow_label = roce_get_field(context->byte_28_at_fl,
+ grh->flow_label = roce_get_field(context.byte_28_at_fl,
V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S);
- grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
+ grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S);
- grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
+ grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S);
- grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
+ grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S);
- memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
+ memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
}
qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
+ qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S);
- qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
+ qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S);
- qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
+ qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S);
- qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
+ qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S);
- qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
+ qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S);
- qp_attr->rnr_retry = context->rq_rnr_timer;
+ qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -4518,7 +4641,6 @@ done:
out:
mutex_unlock(&hr_qp->mutex);
- kfree(context);
return ret;
}
@@ -4527,7 +4649,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata)
{
struct hns_roce_cq *send_cq, *recv_cq;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
@@ -4535,8 +4657,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
if (ret) {
- dev_err(dev, "modify QP %06lx to ERR failed.\n",
- hr_qp->qpn);
+ ibdev_err(ibdev, "modify QP to Reset failed.\n");
return ret;
}
}
@@ -4605,7 +4726,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret) {
- dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
+ ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
+ hr_qp->qpn, ret);
return ret;
}
@@ -4829,7 +4951,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- u32 doorbell[2];
+ __le32 doorbell[2];
doorbell[0] = 0;
doorbell[1] = 0;
@@ -4904,7 +5026,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_aeqe *aeqe;
+ struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
int aeqe_found = 0;
int event_type;
int sub_type;
@@ -4912,8 +5034,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
u32 qpn;
u32 cqn;
- while ((aeqe = next_aeqe_sw_v2(eq))) {
-
+ while (aeqe) {
/* Make sure we read AEQ entry after we have checked the
* ownership bit
*/
@@ -4977,11 +5098,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
aeqe_found = 1;
- if (eq->cons_index > (2 * eq->entries - 1)) {
- dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ if (eq->cons_index > (2 * eq->entries - 1))
eq->cons_index = 0;
- }
+
hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
+
+ aeqe = next_aeqe_sw_v2(eq);
}
set_eq_cons_index_v2(eq);
@@ -5034,12 +5156,11 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_ceqe *ceqe;
+ struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
int ceqe_found = 0;
u32 cqn;
- while ((ceqe = next_ceqe_sw_v2(eq))) {
-
+ while (ceqe) {
/* Make sure we read CEQ entry after we have checked the
* ownership bit
*/
@@ -5054,10 +5175,12 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = 1;
- if (eq->cons_index > (2 * eq->entries - 1)) {
+ if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
+
+ ceqe = next_ceqe_sw_v2(eq);
}
set_eq_cons_index_v2(eq);
@@ -5093,14 +5216,14 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
- if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
struct pci_dev *pdev = hr_dev->pci_dev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
const struct hnae3_ae_ops *ops = ae_dev->ops;
dev_err(dev, "AEQ overflow!\n");
- roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
/* Set reset level for reset_event() */
@@ -5110,27 +5233,27 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
if (ops->reset_event)
ops->reset_event(pdev, NULL);
- roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+ } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
dev_err(dev, "BUS ERR!\n");
- roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+ int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
- roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+ } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
dev_err(dev, "OTHER ERR!\n");
- roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+ int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
- roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
@@ -5202,14 +5325,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
- /* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) {
dma_free_coherent(dev, (unsigned int)(eq->entries *
eq->eqe_size), eq->bt_l0, eq->l0_dma);
return;
}
- /* hop_num = 1 or hop = 2 */
dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
if (mhop_num == 1) {
for (i = 0; i < eq->l0_last_num; i++) {
@@ -5449,7 +5570,6 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz);
bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
- /* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) {
if (eq->entries > buf_chk_sz / eq->eqe_size) {
dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
@@ -5515,7 +5635,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
break;
}
eq->cur_eqe_ba = eq->buf_dma[0];
- eq->nxt_eqe_ba = eq->buf_dma[1];
+ if (ba_num > 1)
+ eq->nxt_eqe_ba = eq->buf_dma[1];
} else if (mhop_num == 2) {
/* alloc L1 BT and buf */
@@ -5556,7 +5677,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
break;
}
eq->cur_eqe_ba = eq->buf_dma[0];
- eq->nxt_eqe_ba = eq->buf_dma[1];
+ if (ba_num > 1)
+ eq->nxt_eqe_ba = eq->buf_dma[1];
}
eq->l0_last_num = i + 1;
@@ -5699,6 +5821,95 @@ free_cmd_mbox:
return ret;
}
+static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
+ int comp_num, int aeq_num, int other_num)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_kzalloc_failed;
+ }
+ }
+
+ /* irq contains: abnormal + AEQ + CEQ */
+ for (j = 0; j < other_num; j++)
+ snprintf((char *)hr_dev->irq_names[j],
+ HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
+
+ for (j = other_num; j < (other_num + aeq_num); j++)
+ snprintf((char *)hr_dev->irq_names[j],
+ HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+ j - other_num);
+
+ for (j = (other_num + aeq_num); j < irq_num; j++)
+ snprintf((char *)hr_dev->irq_names[j],
+ HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+ j - other_num - aeq_num);
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < other_num)
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[j], hr_dev);
+
+ else if (j < (other_num + comp_num))
+ ret = request_irq(eq_table->eq[j - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[j + aeq_num],
+ &eq_table->eq[j - other_num]);
+ else
+ ret = request_irq(eq_table->eq[j - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[j - comp_num],
+ &eq_table->eq[j - other_num]);
+ if (ret) {
+ dev_err(hr_dev->dev, "Request irq error!\n");
+ goto err_request_failed;
+ }
+ }
+
+ return 0;
+
+err_request_failed:
+ for (j -= 1; j >= 0; j--)
+ if (j < other_num)
+ free_irq(hr_dev->irq[j], hr_dev);
+ else
+ free_irq(eq_table->eq[j - other_num].irq,
+ &eq_table->eq[j - other_num]);
+
+err_kzalloc_failed:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+
+ return ret;
+}
+
+static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
+{
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++)
+ free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+}
+
static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
@@ -5710,7 +5921,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
int other_num;
int comp_num;
int aeq_num;
- int i, j, k;
+ int i;
int ret;
other_num = hr_dev->caps.num_other_vectors;
@@ -5724,27 +5935,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
if (!eq_table->eq)
return -ENOMEM;
- for (i = 0; i < irq_num; i++) {
- hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
- GFP_KERNEL);
- if (!hr_dev->irq_names[i]) {
- ret = -ENOMEM;
- goto err_failed_kzalloc;
- }
- }
-
/* create eq */
- for (j = 0; j < eq_num; j++) {
- eq = &eq_table->eq[j];
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
eq->hr_dev = hr_dev;
- eq->eqn = j;
- if (j < comp_num) {
+ eq->eqn = i;
+ if (i < comp_num) {
/* CEQ */
eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
eq->type_flag = HNS_ROCE_CEQ;
eq->entries = hr_dev->caps.ceqe_depth;
eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
- eq->irq = hr_dev->irq[j + other_num + aeq_num];
+ eq->irq = hr_dev->irq[i + other_num + aeq_num];
eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
} else {
@@ -5753,7 +5955,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
eq->type_flag = HNS_ROCE_AEQ;
eq->entries = hr_dev->caps.aeqe_depth;
eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
- eq->irq = hr_dev->irq[j - comp_num + other_num];
+ eq->irq = hr_dev->irq[i - comp_num + other_num];
eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
}
@@ -5768,40 +5970,11 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
/* enable irq */
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
- /* irq contains: abnormal + AEQ + CEQ*/
- for (k = 0; k < irq_num; k++)
- if (k < other_num)
- snprintf((char *)hr_dev->irq_names[k],
- HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
- else if (k < (other_num + aeq_num))
- snprintf((char *)hr_dev->irq_names[k],
- HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
- k - other_num);
- else
- snprintf((char *)hr_dev->irq_names[k],
- HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
- k - other_num - aeq_num);
-
- for (k = 0; k < irq_num; k++) {
- if (k < other_num)
- ret = request_irq(hr_dev->irq[k],
- hns_roce_v2_msix_interrupt_abn,
- 0, hr_dev->irq_names[k], hr_dev);
-
- else if (k < (other_num + comp_num))
- ret = request_irq(eq_table->eq[k - other_num].irq,
- hns_roce_v2_msix_interrupt_eq,
- 0, hr_dev->irq_names[k + aeq_num],
- &eq_table->eq[k - other_num]);
- else
- ret = request_irq(eq_table->eq[k - other_num].irq,
- hns_roce_v2_msix_interrupt_eq,
- 0, hr_dev->irq_names[k - comp_num],
- &eq_table->eq[k - other_num]);
- if (ret) {
- dev_err(dev, "Request irq error!\n");
- goto err_request_irq_fail;
- }
+ ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
+ aeq_num, other_num);
+ if (ret) {
+ dev_err(dev, "Request irq failed.\n");
+ goto err_request_irq_fail;
}
hr_dev->irq_workq =
@@ -5809,26 +5982,20 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
ret = -ENOMEM;
- goto err_request_irq_fail;
+ goto err_create_wq_fail;
}
return 0;
+err_create_wq_fail:
+ __hns_roce_free_irq(hr_dev);
+
err_request_irq_fail:
- for (k -= 1; k >= 0; k--)
- if (k < other_num)
- free_irq(hr_dev->irq[k], hr_dev);
- else
- free_irq(eq_table->eq[k - other_num].irq,
- &eq_table->eq[k - other_num]);
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
err_create_eq_fail:
- for (j -= 1; j >= 0; j--)
- hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
-
-err_failed_kzalloc:
for (i -= 1; i >= 0; i--)
- kfree(hr_dev->irq_names[i]);
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
kfree(eq_table->eq);
return ret;
@@ -5837,30 +6004,22 @@ err_failed_kzalloc:
static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- int irq_num;
int eq_num;
int i;
eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
/* Disable irq */
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
- for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
- free_irq(hr_dev->irq[i], hr_dev);
+ __hns_roce_free_irq(hr_dev);
for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i);
- free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
-
hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
}
- for (i = 0; i < irq_num; i++)
- kfree(hr_dev->irq_names[i]);
-
kfree(eq_table->eq);
flush_workqueue(hr_dev->irq_workq);
@@ -5904,7 +6063,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_24_wqe_bt_ba,
SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- cpu_to_le32(dma_handle_wqe >> 35));
+ dma_handle_wqe >> 35);
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
SRQC_BYTE_28_PD_S, pdn);
@@ -5912,20 +6071,18 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
fls(srq->max_gs - 1));
- srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
- srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
+ srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
roce_set_field(srq_context->rsv_idx_bt_ba,
SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- cpu_to_le32(dma_handle_idx >> 35));
+ dma_handle_idx >> 35);
- srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
srq_context->idx_cur_blk_addr =
- cpu_to_le32(srq_context->idx_cur_blk_addr);
+ cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
+ mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
@@ -5941,13 +6098,12 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
hr_dev->caps.idx_buf_pg_sz);
- srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
srq_context->idx_nxt_blk_addr =
- cpu_to_le32(srq_context->idx_nxt_blk_addr);
+ cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
roce_set_field(srq_context->rsv_idxnxtblkaddr,
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
+ mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
cqn);
@@ -6141,9 +6297,10 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
*/
wmb();
- srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M);
- srq_db.parameter = srq->head;
+ srq_db.byte_4 =
+ cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ srq_db.parameter = cpu_to_le32(srq->head);
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
@@ -6433,7 +6590,7 @@ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
- msleep(100);
+ msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
__hns_roce_hw_v2_uninit_instance(handle, false);
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 478f5a5b7aa1..43219d2f7de0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -96,7 +96,10 @@
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
-#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
+#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
+#define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100
+
+#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_SCCC_HOP_NUM 1
@@ -126,8 +129,6 @@
#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
#define HNS_ROCE_CMQ_DESC_NUM_S 3
-#define HNS_ROCE_CMQ_EN_B 16
-#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 1e4ba48f5613..1b757cc924c3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = (props->state == IB_PORT_ACTIVE) ?
- HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED;
+ IB_PORT_PHYS_STATE_LINK_UP :
+ IB_PORT_PHYS_STATE_DISABLED;
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 549e1a38dfe0..5f8416ba09a9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -347,155 +347,207 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0;
}
+static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr, u32 pbl_bt_sz)
+{
+ struct device *dev = hr_dev->dev;
-/* PBL multi hop addressing */
-static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr)
+ if (npages > pbl_bt_sz / 8) {
+ dev_err(dev, "npages %d is larger than buf_pg_sz!",
+ npages);
+ return -EINVAL;
+ }
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+
+ mr->pbl_size = npages;
+ mr->pbl_ba = mr->pbl_dma_addr;
+ mr->pbl_hop_num = 1;
+ mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+ return 0;
+
+}
+
+
+static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
- int mr_alloc_done = 0;
int npages_allocated;
- int i = 0, j = 0;
- u32 pbl_bt_sz;
- u32 mhop_num;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
- u64 bt_idx;
u64 size;
+ int i;
- mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- /* hop_num = 1 */
- if (mhop_num == 1) {
- if (npages > pbl_bt_sz / 8) {
- dev_err(dev, "npages %d is larger than buf_pg_sz!",
- npages);
- return -EINVAL;
+ /* alloc L1 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+ size = pbl_bt_sz;
+ } else {
+ npages_allocated = i * (pbl_bt_sz / 8);
+ size = (npages - npages_allocated) * 8;
}
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
return -ENOMEM;
+ }
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = mhop_num;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
- return 0;
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+ pbl_bt_cnt++;
+ if (pbl_bt_cnt >= pbl_last_bt_num)
+ break;
}
- mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
- sizeof(*mr->pbl_l1_dma_addr),
+ mr->l0_chunk_last_num = i + 1;
+
+ return 0;
+}
+
+static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr, u32 pbl_bt_sz)
+{
+ struct device *dev = hr_dev->dev;
+ int mr_alloc_done = 0;
+ int npages_allocated;
+ u64 pbl_last_bt_num;
+ u64 pbl_bt_cnt = 0;
+ u64 bt_idx;
+ u64 size;
+ int i;
+ int j = 0;
+
+ pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+ mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
- if (!mr->pbl_l1_dma_addr)
+ if (!mr->pbl_l2_dma_addr)
return -ENOMEM;
- mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+ mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
- if (!mr->pbl_bt_l1)
- goto err_kcalloc_bt_l1;
-
- if (mhop_num == 3) {
- mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_l2_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l2_dma_addr)
- goto err_kcalloc_l2_dma;
+ if (!mr->pbl_bt_l2)
+ goto err_kcalloc_bt_l2;
+
+ /* alloc L1, L2 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ goto err_dma_alloc_l0;
+ }
- mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_bt_l2),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2)
- goto err_kcalloc_bt_l2;
- }
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
- /* alloc L0 BT */
- mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l0_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_bt_l0)
- goto err_dma_alloc_l0;
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * pbl_bt_sz / 8 + j;
- if (mhop_num == 2) {
- /* alloc L1 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
- npages_allocated = i * (pbl_bt_sz / 8);
+ npages_allocated = bt_idx *
+ (pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+ dev, size,
+ &(mr->pbl_l2_dma_addr[bt_idx]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l2[bt_idx]) {
+ hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0;
}
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+ *(mr->pbl_bt_l1[i] + j) =
+ mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num)
+ if (pbl_bt_cnt >= pbl_last_bt_num) {
+ mr_alloc_done = 1;
break;
- }
- } else if (mhop_num == 3) {
- /* alloc L1, L2 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- goto err_dma_alloc_l0;
}
+ }
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+ if (mr_alloc_done)
+ break;
+ }
- for (j = 0; j < pbl_bt_sz / 8; j++) {
- bt_idx = i * pbl_bt_sz / 8 + j;
+ mr->l0_chunk_last_num = i + 1;
+ mr->l1_chunk_last_num = j + 1;
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = bt_idx *
- (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
- dev, size,
- &(mr->pbl_l2_dma_addr[bt_idx]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2[bt_idx]) {
- hns_roce_loop_free(hr_dev, mr, 2, i, j);
- goto err_dma_alloc_l0;
- }
- *(mr->pbl_bt_l1[i] + j) =
- mr->pbl_l2_dma_addr[bt_idx];
+ return 0;
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num) {
- mr_alloc_done = 1;
- break;
- }
- }
+err_dma_alloc_l0:
+ kfree(mr->pbl_bt_l2);
+ mr->pbl_bt_l2 = NULL;
- if (mr_alloc_done)
- break;
- }
+err_kcalloc_bt_l2:
+ kfree(mr->pbl_l2_dma_addr);
+ mr->pbl_l2_dma_addr = NULL;
+
+ return -ENOMEM;
+}
+
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = hr_dev->dev;
+ u32 pbl_bt_sz;
+ u32 mhop_num;
+
+ mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+
+ if (mhop_num == HNS_ROCE_HOP_NUM_0)
+ return 0;
+
+ if (mhop_num == 1)
+ return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
+
+ mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+ sizeof(*mr->pbl_l1_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_l1_dma_addr)
+ return -ENOMEM;
+
+ mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ /* alloc L0 BT */
+ mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l0_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l0)
+ goto err_kcalloc_l2_dma;
+
+ if (mhop_num == 2) {
+ if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
+ goto err_kcalloc_l2_dma;
+ }
+
+ if (mhop_num == 3) {
+ if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
+ goto err_kcalloc_l2_dma;
}
- mr->l0_chunk_last_num = i + 1;
- if (mhop_num == 3)
- mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
@@ -505,14 +557,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
return 0;
-err_dma_alloc_l0:
- kfree(mr->pbl_bt_l2);
- mr->pbl_bt_l2 = NULL;
-
-err_kcalloc_bt_l2:
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_l2_dma_addr = NULL;
-
err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL;
@@ -1161,6 +1205,83 @@ err_free:
return ERR_PTR(ret);
}
+static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
+ u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct hns_roce_cmd_mailbox *mailbox,
+ u32 pdn, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct device *dev = hr_dev->dev;
+ int npages;
+ int ret;
+
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8,
+ mr->pbl_buf, mr->pbl_dma_addr);
+ }
+ ib_umem_release(mr->umem);
+
+ mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ ret = PTR_ERR(mr->umem);
+ mr->umem = NULL;
+ return -ENOMEM;
+ }
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num) {
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+ if (ret)
+ goto release_umem;
+ } else {
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf) {
+ ret = -ENOMEM;
+ goto release_umem;
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+ mr_access_flags, virt_addr,
+ length, mailbox->buf);
+ if (ret)
+ goto release_umem;
+
+
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ if (ret) {
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8,
+ mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ goto release_umem;
+ }
+
+ return 0;
+
+release_umem:
+ ib_umem_release(mr->umem);
+ return ret;
+
+}
+
+
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata)
@@ -1171,7 +1292,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
- int npages;
int ret;
if (!mr->enabled)
@@ -1198,73 +1318,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
pdn = to_hr_pd(pd)->pdn;
if (flags & IB_MR_REREG_TRANS) {
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8, mr->pbl_buf,
- mr->pbl_dma_addr);
- }
- ib_umem_release(mr->umem);
-
- mr->umem =
- ib_umem_get(udata, start, length, mr_access_flags, 0);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- mr->umem = NULL;
+ ret = rereg_mr_trans(ibmr, flags,
+ start, length,
+ virt_addr, mr_access_flags,
+ mailbox, pdn, udata);
+ if (ret)
goto free_cmd_mbox;
- }
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num) {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- if (ret)
- goto release_umem;
- } else {
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf) {
- ret = -ENOMEM;
- goto release_umem;
- }
- }
- }
-
- ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
- mr_access_flags, virt_addr,
- length, mailbox->buf);
- if (ret) {
- if (flags & IB_MR_REREG_TRANS)
- goto release_umem;
- else
+ } else {
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+ mr_access_flags, virt_addr,
+ length, mailbox->buf);
+ if (ret)
goto free_cmd_mbox;
}
- if (flags & IB_MR_REREG_TRANS) {
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
- if (ret) {
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
- goto release_umem;
- }
- }
-
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
- goto release_umem;
+ ib_umem_release(mr->umem);
+ goto free_cmd_mbox;
}
mr->enabled = 1;
@@ -1275,9 +1347,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
return 0;
-release_umem:
- ib_umem_release(mr->umem);
-
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -1357,7 +1426,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
+ mr->pbl_buf[mr->npages++] = addr;
return 0;
}
@@ -1528,10 +1597,9 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
/* Save page addr, low 12 bits : 0 */
for (i = 0; i < count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtts[i] = cpu_to_le64(bufs[npage] >>
- PAGE_ADDR_SHIFT);
+ mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT;
else
- mtts[i] = cpu_to_le64(bufs[npage]);
+ mtts[i] = bufs[npage];
npage++;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e0424029b058..8868172d99c8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -324,31 +324,46 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_ib_create_qp *ucmd)
+static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
+ struct hns_roce_ib_create_qp *ucmd)
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
- u32 ex_sge_num;
- u32 page_size;
- u32 max_cnt;
/* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- dev_err(hr_dev->dev, "check SQ size error!\n");
+ ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
- dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
- cap->max_send_sge);
+ ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n",
+ cap->max_send_sge);
return -EINVAL;
}
+ return 0;
+}
+
+static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ u32 ex_sge_num;
+ u32 page_size;
+ u32 max_cnt;
+ int ret;
+
+ ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
+ return ret;
+ }
+
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
@@ -501,6 +516,35 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
return bt_pg_shift - PAGE_SHIFT;
}
+static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct device *dev = hr_dev->dev;
+
+ if (hr_qp->sq.max_gs > 2) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ (hr_qp->sq.max_gs - 2));
+ hr_qp->sge.sge_shift = 4;
+ }
+
+ /* ud sqwqe's sge use extend sge */
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ hr_qp->sq.max_gs);
+ hr_qp->sge.sge_shift = 4;
+ }
+
+ if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
@@ -509,6 +553,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
u32 page_size;
u32 max_cnt;
int size;
+ int ret;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -518,8 +563,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
- hr_qp->sq_max_wqes_per_wr = 1;
- hr_qp->sq_spare_wqes = 0;
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
@@ -539,25 +582,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
else
hr_qp->sq.max_gs = max_cnt;
- if (hr_qp->sq.max_gs > 2) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
- hr_qp->sge.sge_shift = 4;
- }
-
- /* ud sqwqe's sge use extend sge */
- if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- hr_qp->sq.max_gs);
- hr_qp->sge.sge_shift = 4;
- }
-
- if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
+ ret = set_extend_sge_param(hr_dev, hr_qp);
+ if (ret) {
+ dev_err(dev, "set extend sge parameters fail\n");
+ return ret;
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
@@ -613,7 +641,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp)
{
- dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 };
+ dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
struct hns_roce_ib_create_qp_resp resp = {};
@@ -635,9 +663,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_type = init_attr->qp_type;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
- hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
hns_roce_qp_has_rq(init_attr), hr_qp);
@@ -826,11 +854,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
GFP_KERNEL);
- hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
ret = -ENOMEM;
- goto err_wrid;
+ goto err_get_bufs;
+ }
+
+ if (hr_qp->rq.wqe_cnt) {
+ hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
+ ret = -ENOMEM;
+ goto err_sq_wrid;
+ }
}
}
@@ -875,7 +910,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (sqpn)
hr_qp->doorbell_qpn = 1;
else
- hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+ hr_qp->doorbell_qpn = (u32)hr_qp->qpn;
if (udata) {
ret = ib_copy_to_udata(udata, &resp,
@@ -916,8 +951,8 @@ err_wrid:
hns_roce_qp_has_rq(init_attr))
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
} else {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
+ if (hr_qp->rq.wqe_cnt)
+ kfree(hr_qp->rq.wrid);
}
err_sq_dbmap:
@@ -928,6 +963,10 @@ err_sq_dbmap:
hns_roce_qp_has_sq(init_attr))
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+err_sq_wrid:
+ if (!udata)
+ kfree(hr_qp->sq.wrid);
+
err_get_bufs:
hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
@@ -942,11 +981,13 @@ err_db:
hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
err_wqe_list:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list);
err_out:
@@ -958,7 +999,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -972,7 +1013,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp);
if (ret) {
- dev_err(dev, "Create RC QP failed\n");
+ ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+ hr_qp->qpn, ret);
kfree(hr_qp);
return ERR_PTR(ret);
}
@@ -984,7 +1026,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
case IB_QPT_GSI: {
/* Userspace is not allowed to create special QPs: */
if (udata) {
- dev_err(dev, "not support usr space GSI\n");
+ ibdev_err(ibdev, "not support usr space GSI\n");
return ERR_PTR(-EINVAL);
}
@@ -1006,7 +1048,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
- dev_err(dev, "Create GSI QP failed!\n");
+ ibdev_err(ibdev, "Create GSI QP failed!\n");
kfree(hr_sqp);
return ERR_PTR(ret);
}
@@ -1014,7 +1056,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
break;
}
default:{
- dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
+ ibdev_err(ibdev, "not support QP type %d\n",
+ init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
}
@@ -1040,23 +1083,88 @@ int to_hr_qp_type(int qp_type)
return transport_type;
}
+static int check_mtu_validate(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_attr *attr, int attr_mask)
+{
+ enum ib_mtu active_mtu;
+ int p;
+
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
+ attr->path_mtu > hr_dev->caps.max_mtu) ||
+ attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int p;
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr port_num invalid.attr->port_num=%d\n",
+ attr->port_num);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr pkey_index invalid.attr->pkey_index=%d\n",
+ attr->pkey_index);
+ return -EINVAL;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
+ attr->max_rd_atomic);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+ "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
+ attr->max_dest_rd_atomic);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
+
+ return 0;
+}
+
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
- struct device *dev = hr_dev->dev;
int ret = -EINVAL;
- int p;
- enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
- new_state = attr_mask & IB_QP_STATE ?
- attr->qp_state : cur_state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
@@ -1066,67 +1174,27 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (hr_qp->rdb_en == 1)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
- dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ ibdev_warn(&hr_dev->ib_dev,
+ "flush cqe is not supported in userspace!\n");
goto out;
}
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask)) {
- dev_err(dev, "ib_modify_qp_is_ok failed\n");
+ ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
- if ((attr_mask & IB_QP_PORT) &&
- (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
- dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
- attr->port_num);
- goto out;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX) {
- p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
- if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
- dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
- attr->pkey_index);
- goto out;
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
- active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
-
- if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
- attr->path_mtu > IB_MTU_4096) ||
- (hr_dev->caps.max_mtu == IB_MTU_2048 &&
- attr->path_mtu > IB_MTU_2048) ||
- attr->path_mtu < IB_MTU_256 ||
- attr->path_mtu > active_mtu) {
- dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
- attr->path_mtu);
- goto out;
- }
- }
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
- dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
- attr->max_rd_atomic);
- goto out;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
- dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
- attr->max_dest_rd_atomic);
+ ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
+ if (ret)
goto out;
- }
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
if (hr_dev->caps.min_wqes) {
ret = -EPERM;
- dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+ ibdev_err(&hr_dev->ib_dev,
+ "cur_state=%d new_state=%d\n", cur_state,
new_state);
} else {
ret = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 38bb548eaa6d..9591457eb768 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -175,6 +175,76 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
+static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
+ int srq_buf_size)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct hns_roce_ib_create_srq ucmd;
+ u32 page_shift;
+ u32 npages;
+ int ret;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ return -EFAULT;
+
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+ if (IS_ERR(srq->umem))
+ return PTR_ERR(srq->umem);
+
+ npages = (ib_umem_page_count(srq->umem) +
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+ page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+ if (ret)
+ goto err_user_buf;
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
+ if (ret)
+ goto err_user_srq_mtt;
+
+ /* config index queue BA */
+ srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
+ srq->idx_que.buf_size, 0, 0);
+ if (IS_ERR(srq->idx_que.umem)) {
+ dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
+ ret = PTR_ERR(srq->idx_que.umem);
+ goto err_user_srq_mtt;
+ }
+
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
+ PAGE_SHIFT, &srq->idx_que.mtt);
+
+ if (ret) {
+ dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
+ goto err_user_idx_mtt;
+ }
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
+ srq->idx_que.umem);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "hns_roce_ib_umem_write_mtt error for idx que\n");
+ goto err_user_idx_buf;
+ }
+
+ return 0;
+
+err_user_idx_buf:
+ hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+
+err_user_idx_mtt:
+ ib_umem_release(srq->idx_que.umem);
+
+err_user_srq_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+
+err_user_buf:
+ ib_umem_release(srq->umem);
+
+ return ret;
+}
+
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift)
{
@@ -196,6 +266,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
return 0;
}
+static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ int ret;
+
+ if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
+ &srq->buf, page_shift))
+ return -ENOMEM;
+
+ srq->head = 0;
+ srq->tail = srq->max - 1;
+
+ ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
+ &srq->mtt);
+ if (ret)
+ goto err_kernel_buf;
+
+ ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
+ if (ret)
+ goto err_kernel_srq_mtt;
+
+ page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+ ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
+ if (ret) {
+ dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
+ goto err_kernel_srq_mtt;
+ }
+
+ /* Init mtt table for idx_que */
+ ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
+ srq->idx_que.idx_buf.page_shift,
+ &srq->idx_que.mtt);
+ if (ret)
+ goto err_kernel_create_idx;
+
+ /* Write buffer address into the mtt table */
+ ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
+ &srq->idx_que.idx_buf);
+ if (ret)
+ goto err_kernel_idx_buf;
+
+ srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid) {
+ ret = -ENOMEM;
+ goto err_kernel_idx_buf;
+ }
+
+ return 0;
+
+err_kernel_idx_buf:
+ hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+
+err_kernel_create_idx:
+ hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
+ &srq->idx_que.idx_buf);
+ kfree(srq->idx_que.bitmap);
+
+err_kernel_srq_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+
+err_kernel_buf:
+ hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+
+ return ret;
+}
+
+static void destroy_user_srq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+ ib_umem_release(srq->idx_que.umem);
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+ ib_umem_release(srq->umem);
+}
+
+static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq, int srq_buf_size)
+{
+ kvfree(srq->wrid);
+ hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+ hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
+ kfree(srq->idx_que.bitmap);
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+ hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+}
+
int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
@@ -205,9 +362,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
int srq_desc_size;
int srq_buf_size;
- u32 page_shift;
int ret = 0;
- u32 npages;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
@@ -233,115 +388,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (udata) {
- struct hns_roce_ib_create_srq ucmd;
-
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
- return -EFAULT;
-
- srq->umem =
- ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
- if (IS_ERR(srq->umem))
- return PTR_ERR(srq->umem);
-
- if (hr_dev->caps.srqwqe_buf_pg_sz) {
- npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages,
- page_shift,
- &srq->mtt);
- } else
- ret = hns_roce_mtt_init(hr_dev,
- ib_umem_page_count(srq->umem),
- PAGE_SHIFT, &srq->mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
- if (ret)
- goto err_srq_mtt;
-
- /* config index queue BA */
- srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
- srq->idx_que.buf_size, 0, 0);
- if (IS_ERR(srq->idx_que.umem)) {
- dev_err(hr_dev->dev,
- "ib_umem_get error for index queue\n");
- ret = PTR_ERR(srq->idx_que.umem);
- goto err_srq_mtt;
- }
-
- if (hr_dev->caps.idx_buf_pg_sz) {
- npages = (ib_umem_page_count(srq->idx_que.umem) +
- (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.idx_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages,
- page_shift, &srq->idx_que.mtt);
- } else {
- ret = hns_roce_mtt_init(
- hr_dev, ib_umem_page_count(srq->idx_que.umem),
- PAGE_SHIFT, &srq->idx_que.mtt);
- }
-
+ ret = create_user_srq(srq, udata, srq_buf_size);
if (ret) {
- dev_err(hr_dev->dev,
- "hns_roce_mtt_init error for idx que\n");
- goto err_idx_mtt;
- }
-
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
- srq->idx_que.umem);
- if (ret) {
- dev_err(hr_dev->dev,
- "hns_roce_ib_umem_write_mtt error for idx que\n");
- goto err_idx_buf;
+ dev_err(hr_dev->dev, "Create user srq failed\n");
+ goto err_srq;
}
} else {
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
- (1 << page_shift) * 2, &srq->buf,
- page_shift))
- return -ENOMEM;
-
- srq->head = 0;
- srq->tail = srq->max - 1;
-
- ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
- srq->buf.page_shift, &srq->mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
- if (ret)
- goto err_srq_mtt;
-
- page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
+ ret = create_kernel_srq(srq, srq_buf_size);
if (ret) {
- dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
- ret);
- goto err_srq_mtt;
- }
-
- /* Init mtt table for idx_que */
- ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
- srq->idx_que.idx_buf.page_shift,
- &srq->idx_que.mtt);
- if (ret)
- goto err_create_idx;
-
- /* Write buffer address into the mtt table */
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
- &srq->idx_que.idx_buf);
- if (ret)
- goto err_idx_buf;
-
- srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
- if (!srq->wrid) {
- ret = -ENOMEM;
- goto err_idx_buf;
+ dev_err(hr_dev->dev, "Create kernel srq failed\n");
+ goto err_srq;
}
}
@@ -356,7 +412,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
goto err_wrid;
srq->event = hns_roce_ib_srq_event;
- srq->ibsrq.ext.xrc.srq_num = srq->srqn;
resp.srqn = srq->srqn;
if (udata) {
@@ -373,27 +428,12 @@ err_srqc_alloc:
hns_roce_srq_free(hr_dev, srq);
err_wrid:
- kvfree(srq->wrid);
-
-err_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_idx_mtt:
- ib_umem_release(srq->idx_que.umem);
-
-err_create_idx:
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
- &srq->idx_que.idx_buf);
- bitmap_free(srq->idx_que.bitmap);
-
-err_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_buf:
- ib_umem_release(srq->umem);
- if (!udata)
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+ if (udata)
+ destroy_user_srq(hr_dev, srq);
+ else
+ destroy_kernel_srq(hr_dev, srq, srq_buf_size);
+err_srq:
return ret;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index d169a8031375..8056930bbe2c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -97,18 +97,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
u8 port,
struct ib_port_attr *props)
{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct net_device *netdev = iwdev->netdev;
-
- /* props being zeroed by the caller, avoid zeroing it here */
- props->max_mtu = IB_MTU_4096;
- props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
props->lid = 1;
- if (netif_carrier_ok(iwdev->netdev))
- props->state = IB_PORT_ACTIVE;
- else
- props->state = IB_PORT_DOWN;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8790101facb7..8d2f1e38b891 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -734,7 +734,8 @@ out:
static u8 state_to_phys_state(enum ib_port_state state)
{
- return state == IB_PORT_ACTIVE ? 5 : 3;
+ return state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
}
static int eth_link_query_port(struct ib_device *ibdev, u8 port,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 753479285ce9..6ae503cfc526 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -377,6 +377,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
* again
*/
if (!ib_access_writable(access_flags)) {
+ unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
@@ -385,9 +386,9 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
* cover the memory, but for now it requires a single vma to
* entirely cover the MR to support RO mappings.
*/
- vma = find_vma(current->mm, start);
- if (vma && vma->vm_end >= start + length &&
- vma->vm_start <= start) {
+ vma = find_vma(current->mm, untagged_start);
+ if (vma && vma->vm_end >= untagged_start + length &&
+ vma->vm_start <= untagged_start) {
if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE;
} else {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 82aff2f2fdc2..bd4aa04416c6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -325,7 +325,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
}
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- bool is_user, int has_rq, struct mlx4_ib_qp *qp,
+ bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
u32 inl_recv_sz)
{
/* Sanity check RQ size before proceeding */
@@ -506,10 +506,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
kfree(qp->sqp_proxy_rcv);
}
-static int qp_has_rq(struct ib_qp_init_attr *attr)
+static bool qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
- return 0;
+ return false;
return !attr->srq;
}
@@ -855,12 +855,143 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
mutex_unlock(&context->wqn_ranges_mutex);
}
-static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
- enum mlx4_ib_source_type src,
- struct ib_qp_init_attr *init_attr,
+static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ int qpn;
+ int err;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
+ struct mlx4_ib_cq *mcq;
+ unsigned long flags;
+ int range_size;
+ struct mlx4_ib_create_wq wq;
+ size_t copy_len;
+ int shift;
+ int n;
+
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+ INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
+
+ qp->state = IB_QPS_RESET;
+
+ copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
+
+ if (ib_copy_from_udata(&wq, udata, copy_len)) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
+ wq.reserved[2]) {
+ pr_debug("user command isn't supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
+ pr_debug("WQN range size must be equal or smaller than %d\n",
+ dev->dev->caps.max_rss_tbl_sz);
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+ range_size = 1 << wq.log_range_size;
+
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
+ qp->flags |= MLX4_IB_QP_SCATTER_FCS;
+
+ err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
+ if (err)
+ goto err;
+
+ qp->sq_no_prefetch = 1;
+ qp->sq.wqe_cnt = 1;
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << qp->sq.wqe_shift);
+
+ qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+ if (IS_ERR(qp->umem)) {
+ err = PTR_ERR(qp->umem);
+ goto err;
+ }
+
+ n = ib_umem_page_count(qp->umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
+ if (err)
+ goto err_mtt;
+
+ err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
+ if (err)
+ goto err_mtt;
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
+
+ err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
+ if (err)
+ goto err_wrid;
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+ if (err)
+ goto err_qpn;
+
+ /*
+ * Hardware wants QPN written in big-endian order (after
+ * shifting) for send doorbell. Precompute this value to save
+ * a little bit when posting sends.
+ */
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+
+ qp->mqp.event = mlx4_ib_wq_event;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ /* Maintain device to QPs access, needed for further handling
+ * via reset flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling
+ * via reset flow
+ */
+ mcq = to_mcq(init_attr->send_cq);
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+ mcq = to_mcq(init_attr->recv_cq);
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+ return 0;
+
+err_qpn:
+ mlx4_ib_release_wqn(context, qp, 0);
+err_wrid:
+ mlx4_ib_db_unmap_user(context, &qp->db);
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
+err_buf:
+ ib_umem_release(qp->umem);
+err:
+ return err;
+}
+
+static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn,
struct mlx4_ib_qp **caller_qp)
{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
int qpn;
int err;
struct mlx4_ib_sqp *sqp = NULL;
@@ -870,7 +1001,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
unsigned long flags;
- int range_size = 0;
/* When tunneling special qps, we use a plain UD qp */
if (sqpn) {
@@ -921,15 +1051,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
- qp->pri.vid = 0xFFFF;
- qp->alt.vid = 0xFFFF;
} else {
qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
- qp->pri.vid = 0xFFFF;
- qp->alt.vid = 0xFFFF;
}
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
} else
qp = *caller_qp;
@@ -941,48 +1069,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules);
- qp->state = IB_QPS_RESET;
+ qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
-
if (udata) {
- union {
- struct mlx4_ib_create_qp qp;
- struct mlx4_ib_create_wq wq;
- } ucmd;
+ struct mlx4_ib_create_qp ucmd;
size_t copy_len;
int shift;
int n;
- copy_len = (src == MLX4_IB_QP_SRC) ?
- sizeof(struct mlx4_ib_create_qp) :
- min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
+ copy_len = sizeof(struct mlx4_ib_create_qp);
if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
err = -EFAULT;
goto err;
}
- if (src == MLX4_IB_RWQ_SRC) {
- if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
- ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
- pr_debug("user command isn't supported\n");
- err = -EOPNOTSUPP;
- goto err;
- }
-
- if (ucmd.wq.log_range_size >
- ilog2(dev->dev->caps.max_rss_tbl_sz)) {
- pr_debug("WQN range size must be equal or smaller than %d\n",
- dev->dev->caps.max_rss_tbl_sz);
- err = -EOPNOTSUPP;
- goto err;
- }
- range_size = 1 << ucmd.wq.log_range_size;
- } else {
- qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
- }
+ qp->inl_recv_sz = ucmd.inl_recv_sz;
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
if (!(dev->dev->caps.flags &
@@ -1000,30 +1104,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err;
- if (src == MLX4_IB_QP_SRC) {
- qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
+ qp->sq_no_prefetch = ucmd.sq_no_prefetch;
- err = set_user_sq_size(dev, qp,
- (struct mlx4_ib_create_qp *)
- &ucmd);
- if (err)
- goto err;
- } else {
- qp->sq_no_prefetch = 1;
- qp->sq.wqe_cnt = 1;
- qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
- /* Allocated buffer expects to have at least that SQ
- * size.
- */
- qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
- (qp->sq.wqe_cnt << qp->sq.wqe_shift);
- }
+ err = set_user_sq_size(dev, qp, &ucmd);
+ if (err)
+ goto err;
qp->umem =
- ib_umem_get(udata,
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
- ucmd.wq.buf_addr,
- qp->buf_size, 0, 0);
+ ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1041,11 +1129,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt;
if (qp_has_rq(init_attr)) {
- err = mlx4_ib_db_map_user(udata,
- (src == MLX4_IB_QP_SRC) ?
- ucmd.qp.db_addr :
- ucmd.wq.db_addr,
- &qp->db);
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
if (err)
goto err_mtt;
}
@@ -1115,10 +1199,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_wrid;
}
}
- } else if (src == MLX4_IB_RWQ_SRC) {
- err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
- if (err)
- goto err_wrid;
} else {
/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
* otherwise, the WQE BlueFlame setup flow wrongly causes
@@ -1157,8 +1237,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
- qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event :
- mlx4_ib_wq_event;
+ qp->mqp.event = mlx4_ib_qp_event;
if (!*caller_qp)
*caller_qp = qp;
@@ -1186,8 +1265,6 @@ err_qpn:
if (!sqpn) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1);
- else if (src == MLX4_IB_RWQ_SRC)
- mlx4_ib_release_wqn(context, qp, 0);
else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
@@ -1518,8 +1595,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
/* fall through */
case IB_QPT_UD:
{
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
- init_attr, udata, 0, &qp);
+ err = create_qp_common(pd, init_attr, udata, 0, &qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1549,8 +1625,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
}
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
- init_attr, udata, sqpn, &qp);
+ err = create_qp_common(pd, init_attr, udata, sqpn, &qp);
if (err)
return ERR_PTR(err);
@@ -4047,8 +4122,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev;
- struct ib_qp_init_attr ib_qp_init_attr;
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+ struct ib_qp_init_attr ib_qp_init_attr = {};
struct mlx4_ib_qp *qp;
struct mlx4_ib_create_wq ucmd;
int err, required_cmd_sz;
@@ -4073,14 +4148,13 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (udata->outlen)
return ERR_PTR(-EOPNOTSUPP);
- dev = to_mdev(pd->device);
-
if (init_attr->wq_type != IB_WQT_RQ) {
pr_debug("unsupported wq type %d\n", init_attr->wq_type);
return ERR_PTR(-EOPNOTSUPP);
}
- if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) {
+ if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
pr_debug("unsupported create_flags %u\n",
init_attr->create_flags);
return ERR_PTR(-EOPNOTSUPP);
@@ -4093,7 +4167,6 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
ib_qp_init_attr.qp_context = init_attr->wq_context;
ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
@@ -4104,8 +4177,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
- err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr,
- udata, 0, &qp);
+ err = create_rq(pd, &ib_qp_init_attr, udata, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index af5bbb35c058..59022b744144 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -233,6 +233,8 @@ static bool is_legacy_obj_event_num(u16 event_num)
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
case MLX5_EVENT_TYPE_DCT_DRAINED:
case MLX5_EVENT_TYPE_COMP:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
return true;
default:
return false;
@@ -315,8 +317,10 @@ static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
return eqe->data.qp_srq.type;
case MLX5_EVENT_TYPE_CQ_ERROR:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
return 0;
case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
return MLX5_EVENT_QUEUE_TYPE_DCT;
default:
return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
@@ -542,6 +546,8 @@ static u64 devx_get_obj_id(const void *in)
break;
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
MLX5_GET(arm_xrq_in, in, xrqn));
break;
@@ -776,6 +782,14 @@ static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
return true;
return false;
}
+ case MLX5_CMD_OP_CREATE_PSV:
+ {
+ u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
+
+ if (num_psv == 1)
+ return true;
+ return false;
+ }
default:
return false;
}
@@ -810,6 +824,8 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
return true;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
{
@@ -922,6 +938,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_STATUS:
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_LAG:
return true;
default:
return false;
@@ -1215,6 +1232,12 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
case MLX5_CMD_OP_ALLOC_XRCD:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
break;
+ case MLX5_CMD_OP_CREATE_PSV:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, din, psvn,
+ MLX5_GET(create_psv_out, out, psv0_index));
+ break;
default:
/* The entry must match to one of the devx_is_obj_create_cmd */
WARN_ON(true);
@@ -2285,7 +2308,11 @@ static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
break;
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
+ break;
case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b8841355fcd5..571bb8539cdb 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -32,6 +32,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
*namespace = MLX5_FLOW_NAMESPACE_FDB;
break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
+ break;
default:
return -EINVAL;
}
@@ -101,6 +104,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
return -EINVAL;
+ /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
+ ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
+ return -EINVAL;
+
if (dest_devx) {
devx_obj = uverbs_attr_get_obj(
attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
@@ -112,8 +120,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL;
- /* Allow only flow table as dest when inserting to FDB */
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
} else if (dest_qp) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0569bcab02d4..c6bc4aee7638 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -535,7 +535,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
props->pkey_tbl_len = 1;
props->state = IB_PORT_DOWN;
- props->phys_state = 3;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr;
@@ -561,7 +561,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (netif_running(ndev) && netif_carrier_ok(ndev)) {
props->state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
}
ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
@@ -1867,10 +1867,6 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (err)
goto out_sys_pages;
- if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
- context->ibucontext.invalidate_range =
- &mlx5_ib_invalidate_range;
-
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
@@ -3967,6 +3963,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
priority = FDB_BYPASS_PATH;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ log_max_ft_size));
+ priority = fs_matcher->priority;
}
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
@@ -3981,6 +3982,8 @@ _get_flow_table(struct mlx5_ib_dev *dev,
prio = &dev->flow_db->egress_prios[priority];
else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
prio = &dev->flow_db->fdb;
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
+ prio = &dev->flow_db->rdma_rx[priority];
if (!prio)
return ERR_PTR(-EINVAL);
@@ -5322,11 +5325,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
};
+static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
+{
+ return MLX5_ESWITCH_MANAGER(mdev) &&
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
+ MLX5_ESWITCH_OFFLOADS;
+}
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
+ int num_cnt_ports;
int i;
- for (i = 0; i < dev->num_ports; i++) {
+ num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+
+ for (i = 0; i < num_cnt_ports; i++) {
if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id);
@@ -5428,13 +5441,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ int num_cnt_ports;
int err = 0;
int i;
bool is_shared;
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
+ num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
- for (i = 0; i < dev->num_ports; i++) {
+ for (i = 0; i < num_cnt_ports; i++) {
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
if (err)
goto err_alloc;
@@ -5454,7 +5469,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
}
dev->port[i].cnts.set_id_valid = true;
}
-
return 0;
err_alloc:
@@ -5462,25 +5476,50 @@ err_alloc:
return err;
}
+static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
+ u8 port_num)
+{
+ return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
+ &dev->port[port_num].cnts;
+}
+
+/**
+ * mlx5_ib_get_counters_id - Returns counters id to use for device+port
+ * @dev: Pointer to mlx5 IB device
+ * @port_num: Zero based port number
+ *
+ * mlx5_ib_get_counters_id() Returns counters set id to use for given
+ * device port combination in switchdev and non switchdev mode of the
+ * parent device.
+ */
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
+{
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
+
+ return cnts->set_id;
+}
+
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
u8 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ const struct mlx5_ib_counters *cnts;
+ bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
- /* We support only per port stats */
- if (port_num == 0)
+ if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
return NULL;
- return rdma_alloc_hw_stats_struct(port->cnts.names,
- port->cnts.num_q_counters +
- port->cnts.num_cong_counters +
- port->cnts.num_ext_ppcnt_counters,
+ cnts = get_counters(dev, port_num - 1);
+
+ return rdma_alloc_hw_stats_struct(cnts->names,
+ cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
- struct mlx5_ib_port *port,
+ const struct mlx5_ib_counters *cnts,
struct rdma_hw_stats *stats,
u16 set_id)
{
@@ -5497,8 +5536,8 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
if (ret)
goto free;
- for (i = 0; i < port->cnts.num_q_counters; i++) {
- val = *(__be32 *)(out + port->cnts.offsets[i]);
+ for (i = 0; i < cnts->num_q_counters; i++) {
+ val = *(__be32 *)(out + cnts->offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val);
}
@@ -5508,10 +5547,10 @@ free:
}
static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
- struct mlx5_ib_port *port,
- struct rdma_hw_stats *stats)
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats)
{
- int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ int offset = cnts->num_q_counters + cnts->num_cong_counters;
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
int ret, i;
void *out;
@@ -5524,12 +5563,10 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
if (ret)
goto free;
- for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
+ for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
stats->value[i + offset] =
be64_to_cpup((__be64 *)(out +
- port->cnts.offsets[i + offset]));
- }
-
+ cnts->offsets[i + offset]));
free:
kvfree(out);
return ret;
@@ -5540,7 +5577,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
u8 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev;
int ret, num_counters;
u8 mdev_port_num;
@@ -5548,18 +5585,17 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
if (!stats)
return -EINVAL;
- num_counters = port->cnts.num_q_counters +
- port->cnts.num_cong_counters +
- port->cnts.num_ext_ppcnt_counters;
+ num_counters = cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
/* q_counters are per IB device, query the master mdev */
- ret = mlx5_ib_query_q_counters(dev->mdev, port, stats,
- port->cnts.set_id);
+ ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
if (ret)
return ret;
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
- ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
if (ret)
return ret;
}
@@ -5576,10 +5612,10 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
}
ret = mlx5_lag_query_cong_counters(dev->mdev,
stats->value +
- port->cnts.num_q_counters,
- port->cnts.num_cong_counters,
- port->cnts.offsets +
- port->cnts.num_q_counters);
+ cnts->num_q_counters,
+ cnts->num_cong_counters,
+ cnts->offsets +
+ cnts->num_q_counters);
mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret)
@@ -5594,20 +5630,22 @@ static struct rdma_hw_stats *
mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(counter->device);
- struct mlx5_ib_port *port = &dev->port[counter->port - 1];
+ const struct mlx5_ib_counters *cnts =
+ get_counters(dev, counter->port - 1);
/* Q counters are in the beginning of all counters */
- return rdma_alloc_hw_stats_struct(port->cnts.names,
- port->cnts.num_q_counters,
+ return rdma_alloc_hw_stats_struct(cnts->names,
+ cnts->num_q_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(counter->device);
- struct mlx5_ib_port *port = &dev->port[counter->port - 1];
+ const struct mlx5_ib_counters *cnts =
+ get_counters(dev, counter->port - 1);
- return mlx5_ib_query_q_counters(dev->mdev, port,
+ return mlx5_ib_query_q_counters(dev->mdev, cnts,
counter->stats, counter->id);
}
@@ -5784,7 +5822,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
}
-/* The mlx5_ib_multiport_mutex should be held when calling this function */
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
@@ -5794,6 +5831,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
int err;
int i;
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
@@ -5843,13 +5882,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
}
-/* The mlx5_ib_multiport_mutex should be held when calling this function */
static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
int err;
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
@@ -6932,7 +6972,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->port = kcalloc(num_ports, sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port) {
- ib_dealloc_device((struct ib_device *)dev);
+ ib_dealloc_device(&dev->ib_dev);
return NULL;
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index a40e0abf2338..b5aece786b36 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -56,19 +56,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
struct scatterlist *sg;
int entry;
- if (umem->is_odp) {
- struct ib_umem_odp *odp = to_ib_umem_odp(umem);
- unsigned int page_shift = odp->page_shift;
-
- *ncont = ib_umem_odp_num_pages(odp);
- *count = *ncont << (page_shift - PAGE_SHIFT);
- *shift = page_shift;
- if (order)
- *order = ilog2(roundup_pow_of_two(*ncont));
-
- return;
- }
-
addr = addr >> PAGE_SHIFT;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 9ae587b74b12..c3ea299fe6c9 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -200,6 +200,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_ib_flow_prio fdb;
+ struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -1475,6 +1476,7 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
bool do_modify_atomic)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3401f5f6792e..1eff031ef048 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -784,19 +784,37 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int *ncont, int *order)
{
struct ib_umem *u;
- int err;
*umem = NULL;
- u = ib_umem_get(udata, start, length, access_flags, 0);
- err = PTR_ERR_OR_ZERO(u);
- if (err) {
- mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
- return err;
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ struct ib_umem_odp *odp;
+
+ odp = ib_umem_odp_get(udata, start, length, access_flags);
+ if (IS_ERR(odp)) {
+ mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
+ PTR_ERR(odp));
+ return PTR_ERR(odp);
+ }
+
+ u = &odp->umem;
+
+ *page_shift = odp->page_shift;
+ *ncont = ib_umem_odp_num_pages(odp);
+ *npages = *ncont << (*page_shift - PAGE_SHIFT);
+ if (order)
+ *order = ilog2(roundup_pow_of_two(*ncont));
+ } else {
+ u = ib_umem_get(udata, start, length, access_flags, 0);
+ if (IS_ERR(u)) {
+ mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
+ return PTR_ERR(u);
+ }
+
+ mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ page_shift, ncont, order);
}
- mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
- page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(u);
@@ -1599,7 +1617,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
- if (umem_odp->page_list)
+ if (!umem_odp->is_implicit_odp)
mlx5_ib_invalidate_range(umem_odp,
ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
@@ -1610,7 +1628,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
* so that there will not be any invalidations in
* flight, looking at the *mr struct.
*/
- ib_umem_release(umem);
+ ib_umem_odp_release(umem_odp);
atomic_sub(npages, &dev->mdev->priv.reg_pages);
/* Avoid double-freeing the umem. */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 0a59912a4cef..14fe94bcc788 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -184,7 +184,7 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && odp->umem.address == va) {
+ if (odp && ib_umem_start(odp) == va) {
struct mlx5_ib_mr *mtt = odp->private;
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
@@ -206,7 +206,7 @@ static void mr_leaf_free_action(struct work_struct *work)
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_release(&odp->umem);
+ ib_umem_odp_release(odp);
if (imr->live)
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
@@ -386,7 +386,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
}
static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
- struct ib_umem *umem,
+ struct ib_umem_odp *umem_odp,
bool ksm, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -404,7 +404,7 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
mr->dev = dev;
mr->access_flags = access_flags;
mr->mmkey.iova = 0;
- mr->umem = umem;
+ mr->umem = &umem_odp->umem;
if (ksm) {
err = mlx5_ib_update_xlt(mr, 0,
@@ -464,18 +464,17 @@ next_mr:
if (nentries)
nentries++;
} else {
- odp = ib_alloc_odp_umem(odp_mr, addr,
- MLX5_IMR_MTT_SIZE);
+ odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp)) {
mutex_unlock(&odp_mr->umem_mutex);
return ERR_CAST(odp);
}
- mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
+ mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
mr->access_flags);
if (IS_ERR(mtt)) {
mutex_unlock(&odp_mr->umem_mutex);
- ib_umem_release(&odp->umem);
+ ib_umem_odp_release(odp);
return ERR_CAST(mtt);
}
@@ -497,7 +496,7 @@ next_mr:
addr += MLX5_IMR_MTT_SIZE;
if (unlikely(addr < io_virt + bcnt)) {
odp = odp_next(odp);
- if (odp && odp->umem.address != addr)
+ if (odp && ib_umem_start(odp) != addr)
odp = NULL;
goto next_mr;
}
@@ -521,19 +520,19 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags)
{
struct mlx5_ib_mr *imr;
- struct ib_umem *umem;
+ struct ib_umem_odp *umem_odp;
- umem = ib_umem_get(udata, 0, 0, access_flags, 0);
- if (IS_ERR(umem))
- return ERR_CAST(umem);
+ umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
+ if (IS_ERR(umem_odp))
+ return ERR_CAST(umem_odp);
- imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
+ imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
if (IS_ERR(imr)) {
- ib_umem_release(umem);
+ ib_umem_odp_release(umem_odp);
return ERR_CAST(imr);
}
- imr->umem = umem;
+ imr->umem = &umem_odp->umem;
init_waitqueue_head(&imr->q_leaf_free);
atomic_set(&imr->num_leaf_free, 0);
atomic_set(&imr->num_pending_prefetch, 0);
@@ -541,34 +540,31 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
}
-static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
- void *cookie)
+void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
-
- if (mr->parent != imr)
- return 0;
-
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
+ struct rb_node *node;
- if (umem_odp->dying)
- return 0;
+ down_read(&per_mm->umem_rwsem);
+ for (node = rb_first_cached(&per_mm->umem_tree); node;
+ node = rb_next(node)) {
+ struct ib_umem_odp *umem_odp =
+ rb_entry(node, struct ib_umem_odp, interval_tree.rb);
+ struct mlx5_ib_mr *mr = umem_odp->private;
- WRITE_ONCE(umem_odp->dying, 1);
- atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem_odp->work);
+ if (mr->parent != imr)
+ continue;
- return 0;
-}
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
-{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
+ if (umem_odp->dying)
+ continue;
- down_read(&per_mm->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
- mr_leaf_free, true, imr);
+ WRITE_ONCE(umem_odp->dying, 1);
+ atomic_inc(&imr->num_leaf_free);
+ schedule_work(&umem_odp->work);
+ }
up_read(&per_mm->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
@@ -589,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
struct ib_umem_odp *odp;
size_t size;
- if (!odp_mr->page_list) {
+ if (odp_mr->is_implicit_odp) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
if (IS_ERR(odp))
@@ -607,7 +603,7 @@ next_mr:
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
- if (prefetch && !downgrade && !mr->umem->writable) {
+ if (prefetch && !downgrade && !odp->umem.writable) {
/* prefetch with write-access must
* be supported by the MR
*/
@@ -615,7 +611,7 @@ next_mr:
goto out;
}
- if (mr->umem->writable && !downgrade)
+ if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
current_seq = READ_ONCE(odp->notifiers_seq);
@@ -625,8 +621,8 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
- access_mask, current_seq);
+ ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
+ current_seq);
if (ret < 0)
goto out;
@@ -634,8 +630,7 @@ next_mr:
np = ret;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
- current_seq)) {
+ if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -668,7 +663,7 @@ next_mr:
io_virt += size;
next = odp_next(odp);
- if (unlikely(!next || next->umem.address != io_virt)) {
+ if (unlikely(!next || ib_umem_start(next) != io_virt)) {
mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
io_virt, next);
return -EAGAIN;
@@ -987,17 +982,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
return ret < 0 ? ret : npages;
}
-static const u32 mlx5_ib_odp_opcode_cap[] = {
- [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
- [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
- [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
- [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
- [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
- [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
- [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
- [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
-};
-
/*
* Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE.
@@ -1008,12 +992,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
{
struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
u16 wqe_index = pfault->wqe.wqe_index;
- u32 transport_caps;
struct mlx5_base_av *av;
unsigned ds, opcode;
-#if defined(DEBUG)
- u32 ctrl_wqe_index, ctrl_qpn;
-#endif
u32 qpn = qp->trans_qp.base.mqp.qpn;
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
@@ -1029,58 +1009,17 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT;
}
-#if defined(DEBUG)
- ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
- MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
- MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
- if (wqe_index != ctrl_wqe_index) {
- mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
- wqe_index, qpn,
- ctrl_wqe_index);
- return -EFAULT;
- }
-
- ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
- MLX5_WQE_CTRL_QPN_SHIFT;
- if (qpn != ctrl_qpn) {
- mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
- wqe_index, qpn,
- ctrl_qpn);
- return -EFAULT;
- }
-#endif /* DEBUG */
-
*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
*wqe += sizeof(*ctrl);
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK;
- switch (qp->ibqp.qp_type) {
- case IB_QPT_XRC_INI:
+ if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
- transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
- break;
- case IB_QPT_RC:
- transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
- break;
- case IB_QPT_UD:
- transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
- break;
- default:
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
- qp->ibqp.qp_type);
- return -EFAULT;
- }
- if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
- !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
- opcode);
- return -EFAULT;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_UD) {
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->qp_sub_type == MLX5_IB_QPT_DCI) {
av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
@@ -1143,19 +1082,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
return -EFAULT;
}
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_RECV))
- goto invalid_transport_or_opcode;
- break;
- default:
-invalid_transport_or_opcode:
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
- qp->ibqp.qp_type);
- return -EFAULT;
- }
-
*wqe_end = wqe + wqe_size;
return 0;
@@ -1618,6 +1544,7 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
.advise_mr = mlx5_ib_advise_mr,
+ .invalidate_range = mlx5_ib_invalidate_range,
};
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 72869ff4a334..8937d72ddcf6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3386,19 +3386,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_qp_context context = {};
- struct mlx5_ib_port *mibport = NULL;
struct mlx5_ib_qp_base *base;
u32 set_id;
if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
return 0;
- if (counter) {
+ if (counter)
set_id = counter->id;
- } else {
- mibport = &dev->port[mqp->port - 1];
- set_id = mibport->cnts.set_id;
- }
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
base = &mqp->trans_qp.base;
context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
@@ -3459,7 +3456,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
- struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
u32 set_id = 0;
@@ -3624,11 +3620,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->flags & MLX5_IB_QP_UNDERLAY)
port_num = 0;
- mibport = &dev->port[port_num];
if (ibqp->counter)
set_id = ibqp->counter->id;
else
- set_id = mibport->cnts.set_id;
+ set_id = mlx5_ib_get_counters_id(dev, port_num);
context->qp_counter_set_usr_page |=
cpu_to_be32(set_id << 24);
}
@@ -3817,6 +3812,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u16 set_id;
+
required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL;
@@ -3843,7 +3840,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num);
- MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+ set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+ MLX5_SET(dctc, dctc, counter_set_id, set_id);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
@@ -6345,11 +6344,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
}
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
+ u16 set_id;
+
+ set_id = mlx5_ib_get_counters_id(dev, 0);
if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
- MLX5_SET(rqc, rqc, counter_set_id,
- dev->port->cnts.set_id);
+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
} else
dev_info_once(
&dev->ib_dev.dev,
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index b0d0687c7a68..8fc3630a9d4c 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
- atomic_inc(&srq->common.refcount);
+ refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
return srq;
@@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err)
return err;
- atomic_set(&srq->common.refcount, 1);
+ refcount_set(&srq->common.refcount, 1);
init_completion(&srq->common.free);
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
@@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb,
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
- atomic_inc(&srq->common.refcount);
+ refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
if (!srq)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bccc11378109..e8267e590772 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -163,10 +163,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
netdev = dev->nic_info.netdev;
if (netif_running(netdev) && netif_oper_up(netdev)) {
port_state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
port_state = IB_PORT_DOWN;
- props->phys_state = 3;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(netdev->mtu);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index f97b3d65b30c..5136b835e1ba 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -826,7 +826,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
if (rc)
goto out;
- dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
+ dev->db_addr = out_params.dpi_addr;
dev->db_phys_addr = out_params.dpi_phys_addr;
dev->db_size = out_params.dpi_size;
dev->dpi = out_params.dpi;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index a92ca22e5de1..0cfd849b13d6 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -229,7 +229,7 @@ struct qedr_ucontext {
struct ib_ucontext ibucontext;
struct qedr_dev *dev;
struct qedr_pd *pd;
- u64 dpi_addr;
+ void __iomem *dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 27d90a84ea01..6f3ce86019b7 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -221,10 +221,10 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
/* *attr being zeroed by the caller, avoid zeroing it here */
if (rdma_port->port_state == QED_RDMA_PORT_UP) {
attr->state = IB_PORT_ACTIVE;
- attr->phys_state = 5;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
attr->state = IB_PORT_DOWN;
- attr->phys_state = 3;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
attr->max_mtu = IB_MTU_4096;
attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
@@ -2451,7 +2451,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct qedr_dev *dev = qp->dev;
struct ib_qp_attr attr;
int attr_mask = 0;
- int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
@@ -2496,7 +2495,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
xa_erase_irq(&dev->qps, qp->qp_id);
kfree(qp);
}
- return rc;
+ return 0;
}
int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 27b6e664e59d..b0144229cf3b 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1789,7 +1789,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
static int qib_close(struct inode *in, struct file *fp)
{
- int ret = 0;
struct qib_filedata *fd;
struct qib_ctxtdata *rcd;
struct qib_devdata *dd;
@@ -1873,7 +1872,7 @@ static int qib_close(struct inode *in, struct file *fp)
bail:
kfree(fd);
- return ret;
+ return 0;
}
static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 03f54eb9404b..c9abe1c01e4e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -89,9 +89,15 @@ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
void usnic_ib_log_vf(struct usnic_ib_vf *vf)
{
- char buf[1000];
- usnic_ib_dump_vf(vf, buf, sizeof(buf));
+ char *buf = kzalloc(1000, GFP_KERNEL);
+
+ if (!buf)
+ return;
+
+ usnic_ib_dump_vf(vf, buf, 1000);
usnic_dbg("%s\n", buf);
+
+ kfree(buf);
}
/* Start of netdev section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index eeb07b245ef9..556b8e44a51c 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -194,7 +194,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
return ERR_CAST(dev_list);
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
- vf = pci_get_drvdata(to_pci_dev(dev));
+ vf = dev_get_drvdata(dev);
spin_lock(&vf->lock);
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
@@ -356,13 +356,14 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
if (!us_ibdev->ufdev->link_up) {
props->state = IB_PORT_DOWN;
- props->phys_state = 3;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else if (!us_ibdev->ufdev->inaddr) {
props->state = IB_PORT_INIT;
- props->phys_state = 4;
+ props->phys_state =
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
} else {
props->state = IB_PORT_ACTIVE;
- props->phys_state = 5;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
}
props->port_cap_flags = 0;
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ecf6e659c0da..fb07eed9e402 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -65,10 +65,6 @@
*/
#define RXE_UVERBS_ABI_VERSION 2
-#define RDMA_LINK_PHYS_STATE_LINK_UP (5)
-#define RDMA_LINK_PHYS_STATE_DISABLED (3)
-#define RDMA_LINK_PHYS_STATE_POLLING (2)
-
#define RXE_ROCE_V2_SPORT (0xc000)
static inline u32 rxe_crc32(struct rxe_dev *rxe,
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 1abed47ca221..fe5207386700 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -154,7 +154,7 @@ enum rxe_port_param {
RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
RXE_PORT_ACTIVE_SPEED = 1,
RXE_PORT_PKEY_TBL_LEN = 64,
- RXE_PORT_PHYS_STATE = 2,
+ RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING,
RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
};
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4ebdfcf4d33e..623129f27f5a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -69,11 +69,11 @@ static int rxe_query_port(struct ib_device *dev,
&attr->active_width);
if (attr->state == IB_PORT_ACTIVE)
- attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(rxe->ndev) & IFF_UP)
- attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else
- attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mutex_unlock(&rxe->usdev_lock);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 438a2917a47c..8e72f955921d 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -518,11 +518,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
c_tx->mpa_crc_hd,
iov[seg].iov_base,
plen);
- } else if (do_crc)
- crypto_shash_update(
- c_tx->mpa_crc_hd,
- page_address(p) + fp_off,
- plen);
+ } else if (do_crc) {
+ crypto_shash_update(c_tx->mpa_crc_hd,
+ kmap(p) + fp_off,
+ plen);
+ kunmap(p);
+ }
} else {
u64 va = sge->laddr + sge_off;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index da52c90e06d4..869e02b69a01 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -206,7 +206,8 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
- attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3;
+ attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->pkey_tbl_len = 1;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->state = sdev->state;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 39bf213444cb..52ce63592dcf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,10 @@
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
-#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
-/* Maximum support is 8MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K)
+#define ISCSI_ISER_DEF_SG_TABLESIZE \
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
+/* Maximum support is 16MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 1a039f16d315..e25c70a56be6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1767,8 +1767,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out;
retry:
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
- 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
+ ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8cdd7e66f8df..53d09620e215 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_UMEM:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
+ MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
+ MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 8a4930c8bf62..2011eaf15cc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
trace_data->timestamp = timestamp;
trace_data->lost = lost;
trace_data->event_id = event_id;
- strncpy(trace_data->msg, msg, TRACE_STR_MSG);
+ strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG);
tracer->st_arr.saved_traces_index =
(tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 41f25ea2e8d9..a0e78ab64618 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/
dma_rmb();
- if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
- atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
- else
- mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
-
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
@@ -328,10 +324,13 @@ err_buf:
/**
* mlx5_eq_enable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to enable
- * @nb - notifier call block
- * mlx5_eq_enable - must be called after EQ is created in device.
+ * @dev : Device which owns the eq
+ * @eq : EQ to enable
+ * @nb : Notifier call block
+ *
+ * Must be called after EQ is created in device.
+ *
+ * @return: 0 if no error
*/
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -348,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
EXPORT_SYMBOL(mlx5_eq_enable);
/**
- * mlx5_eq_disable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to disable
- * @nb - notifier call block
- * mlx5_eq_disable - must be called before EQ is destroyed.
+ * mlx5_eq_disable - Disable EQ for receiving EQEs
+ * @dev : Device which owns the eq
+ * @eq : EQ to disable
+ * @nb : Notifier call block
+ *
+ * Must be called before EQ is destroyed.
*/
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -945,9 +945,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
@@ -956,9 +953,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1f3891fde2eb..81e03e493a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc;
};
-enum {
- UC_ADDR_CHANGE = BIT(0),
- MC_ADDR_CHANGE = BIT(1),
- PROMISC_CHANGE = BIT(3),
-};
-
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE | \
- PROMISC_CHANGE)
-
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
- if (events_mask & UC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1);
- if (events_mask & MC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
- if (events_mask & PROMISC_CHANGE)
+ if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ return 0;
+}
+
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
- if (vport->enabled_events & UC_ADDR_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE)
+ if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
- if (vport->enabled_events & PROMISC_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@@ -1393,18 +1411,49 @@ out:
return err;
}
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
+{
+ const struct mlx5_core_dev *dev = esw->dev;
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TASR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
/* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
+static void esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
+ __be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return 0;
+ return;
+
+ if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ return;
if (esw->qos.enabled)
- return -EEXIST;
+ return;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
- return err;
+ return;
}
esw->qos.enabled = true;
- return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
}
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- int enable_events)
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
@@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
- vport->enabled_events = enable_events;
+ vport->enabled_events = enabled_events;
vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -1770,11 +1818,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
+ * whichever are present on the eswitch.
+ */
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int i;
+
+ /* Enable PF vport */
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_enable_vport(esw, vport, enabled_events);
+
+ /* Enable ECPF vports */
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_enable_vport(esw, vport, enabled_events);
+ }
+
+ /* Enable VF vports */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ esw_enable_vport(esw, vport, enabled_events);
+}
+
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
+ * whichever are previously enabled on the eswitch.
+ */
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_disable_vport(esw, vport);
+}
+
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+{
int err;
- int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
@@ -1788,44 +1871,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ esw_create_tsar(esw);
+
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
- err = esw_create_legacy_table(esw);
- if (err)
- goto abort;
+ err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw);
+ err = esw_offloads_enable(esw);
}
if (err)
goto abort;
- err = esw_create_tsar(esw);
- if (err)
- esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
- enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
- UC_ADDR_CHANGE;
-
- /* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
-
- /* Enable ECPF vports */
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
- }
-
- /* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
-
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1847,10 +1909,7 @@ abort:
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct esw_mc_addr *mc_promisc;
- struct mlx5_vport *vport;
int old_mode;
- int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
@@ -1859,21 +1918,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw);
- mlx5_esw_for_all_vports(esw, i, vport)
- esw_disable_vport(esw, vport);
-
- if (mc_promisc && mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_tsar(esw);
-
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_destroy_legacy_table(esw);
+ esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_cleanup(esw);
+ esw_offloads_disable(esw);
+
+ esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 04685dbb280c..5ec7c6dfb88f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -101,6 +101,13 @@ struct mlx5_vport_info {
bool trusted;
};
+/* Vport context events */
+enum mlx5_eswitch_vport_event {
+ MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
+ MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+ MLX5_VPORT_PROMISC_CHANGE = BIT(3),
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -122,7 +129,7 @@ struct mlx5_vport {
} qos;
bool enabled;
- u16 enabled_events;
+ enum mlx5_eswitch_vport_event enabled_events;
};
enum offloads_fdb_flags {
@@ -207,8 +214,11 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
+ /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
+ struct esw_mc_addr mc_promisc;
+ /* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
u32 flags;
@@ -218,7 +228,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -233,8 +242,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -513,6 +522,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
+#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
+ for ((i) = (esw)->total_vports - 1; \
+ (vport) = &(esw)->vports[i], \
+ (i) >= MLX5_VPORT_PF; (i)--)
+
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
@@ -574,6 +588,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0323fd078271..2adee1b867f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -583,38 +583,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
- out, sizeof(out));
- if (err)
- return err;
-
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
-
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
- MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
-
- MLX5_SET(modify_esw_vport_context_in, in,
- field_select.fdb_to_vport_reg_c_id, 1);
-
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
- in, sizeof(in));
-}
-
-static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
- int err;
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
@@ -624,7 +601,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ if (enable)
+ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ else
+ fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@@ -2120,7 +2100,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@@ -2134,11 +2114,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = mlx5_eswitch_enable_passing_vport_metadata(esw);
- if (err)
- goto err_vport_metadata;
- }
+ err = esw_set_passing_vport_metadata(esw, true);
+ if (err)
+ goto err_vport_metadata;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2152,8 +2132,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
@@ -2178,13 +2158,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7ac1249eadc3..1e3381604b3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -182,7 +182,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
break;
@@ -262,7 +262,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
}
@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id)
{
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err;
}
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+ return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0;
}
-struct mlx5_cmd_fc_bulk {
- u32 id;
- int num;
- int outlen;
- u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
-{
- struct mlx5_cmd_fc_bulk *b;
- int outlen =
- MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
- b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->id = id;
- b->num = num;
- b->outlen = outlen;
-
- return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
- kfree(b);
+ return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out)
{
+ int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
- MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
- return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes)
-{
- int index = id - b->id;
- void *stats;
-
- if (index < 0 || index >= b->num) {
- mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
- id, b->id, b->id + b->num - 1);
- return;
- }
-
- stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
- flow_statistics[index]);
- *packets = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index e340f9af2f5a..bc4606306009 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes);
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3e99799bdb40..7bdec442f0ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -60,7 +60,8 @@
ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
+ .def_miss_action = def_miss_act,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
@@ -131,33 +132,41 @@ static struct init_tree_node {
int num_leaf_prios;
int prio;
int num_levels;
+ enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 7,
- .children = (struct init_tree_node[]) {
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
- BY_PASS_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, LAG_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
- LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
- ETHTOOL_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
- KERNEL_NIC_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
+ .children = (struct init_tree_node[]){
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
+ OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
+ KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
+ LEFTOVERS_NUM_LEVELS))),
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
+ ANCHOR_NUM_LEVELS))),
}
};
@@ -167,8 +176,29 @@ static struct init_tree_node egress_root_fs = {
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
+#define RDMA_RX_BYPASS_PRIO 0
+#define RDMA_RX_KERNEL_PRIO 1
+static struct init_tree_node rdma_rx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 2,
+ .children = (struct init_tree_node[]) {
+ [RDMA_RX_BYPASS_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_KERNEL_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+ ADD_MULTIPLE_PRIO(1, 1))),
}
};
@@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
+ ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -2056,16 +2087,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
- case MLX5_FLOW_NAMESPACE_RDMA_RX:
- if (steering->rdma_rx_root_ns)
- return &steering->rdma_rx_root_ns->ns;
- return NULL;
default:
break;
}
if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
root_ns = steering->egress_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_BYPASS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_KERNEL_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2155,7 +2188,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
return ns;
}
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
+ int def_miss_act)
{
struct mlx5_flow_namespace *ns;
@@ -2164,6 +2198,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
+ ns->def_miss_action = def_miss_act;
tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -2230,7 +2265,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
base = &fs_prio->node;
} else if (init_node->type == FS_TYPE_NAMESPACE) {
fs_get_obj(fs_prio, fs_parent_node);
- fs_ns = fs_create_namespace(fs_prio);
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
if (IS_ERR(fs_ns))
return PTR_ERR(fs_ns);
base = &fs_ns->node;
@@ -2494,18 +2529,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ int err;
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
if (!steering->rdma_rx_root_ns)
return -ENOMEM;
- steering->rdma_rx_root_ns->def_miss_action =
- MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
+ err = init_root_tree(steering, &rdma_rx_root_fs,
+ &steering->rdma_rx_root_ns->ns.node);
+ if (err)
+ goto out_err;
- /* Create single prio */
- prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ set_prio_attrs(steering->rdma_rx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ steering->rdma_rx_root_ns = NULL;
+ return err;
}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
@@ -2543,7 +2585,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
}
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio);
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
if (IS_ERR(ns)) {
err = PTR_ERR(ns);
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c1252d6be0ef..0d16b4b5ab83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -145,6 +145,7 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
u32 flags;
struct rhltable fgs_hash;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_ft_underlay_qp {
@@ -191,6 +192,7 @@ struct fs_prio {
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_node node;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_flow_group_mask {
@@ -219,7 +221,6 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
- enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 1834d9f3aa1c..0c2a704dca42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -75,7 +75,7 @@ struct mlx5_fc {
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ * mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
@@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-/* The function returns the last counter that was queried so the caller
- * function can continue calling it till all counters are queried.
- */
-static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct mlx5_fc *counter = NULL;
- struct mlx5_cmd_fc_bulk *b;
- bool more = false;
- u32 afirst_id;
- int num;
- int err;
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+static void update_counter_cache(int index, u32 *bulk_raw_data,
+ struct mlx5_fc_cache *cache)
+{
+ void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
+ flow_statistics[index]);
+ u64 packets = MLX5_GET64(traffic_counter, stats, packets);
+ u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
- /* first id must be aligned to 4 when using bulk query */
- afirst_id = first->id & ~0x3;
+ if (cache->packets == packets)
+ return;
- /* number of counters to query inc. the last counter */
- num = ALIGN(last_id - afirst_id + 1, 4);
- if (num > max_bulk) {
- num = max_bulk;
- last_id = afirst_id + num - 1;
- }
+ cache->packets = packets;
+ cache->bytes = bytes;
+ cache->lastuse = jiffies;
+}
- b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
- if (!b) {
- mlx5_core_err(dev, "Error allocating resources for bulk query\n");
- return NULL;
- }
+static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u32 last_id)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ bool query_more_counters = (first->id <= last_id);
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ u32 *data = fc_stats->bulk_query_out;
+ struct mlx5_fc *counter = first;
+ u32 bulk_base_id;
+ int bulk_len;
+ int err;
- err = mlx5_cmd_fc_bulk_query(dev, b);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- goto out;
- }
+ while (query_more_counters) {
+ /* first id must be aligned to 4 when using bulk query */
+ bulk_base_id = counter->id & ~0x3;
- counter = first;
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- struct mlx5_fc_cache *c = &counter->cache;
- u64 packets;
- u64 bytes;
+ /* number of counters to query inc. the last counter */
+ bulk_len = min_t(int, max_bulk_len,
+ ALIGN(last_id - bulk_base_id + 1, 4));
- if (counter->id > last_id) {
- more = true;
- break;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
+ data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
+ query_more_counters = false;
- mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &packets, &bytes);
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
+ int counter_index = counter->id - bulk_base_id;
+ struct mlx5_fc_cache *cache = &counter->cache;
- if (c->packets == packets)
- continue;
+ if (counter->id >= bulk_base_id + bulk_len) {
+ query_more_counters = true;
+ break;
+ }
- c->packets = packets;
- c->bytes = bytes;
- c->lastuse = jiffies;
+ update_counter_cache(counter_index, data, cache);
+ }
}
-
-out:
- mlx5_cmd_fc_bulk_free(b);
-
- return more ? counter : NULL;
}
static void mlx5_free_fc(struct mlx5_core_dev *dev,
@@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
- while (counter)
- counter = mlx5_fc_stats_query(dev, counter, last->id);
+ if (counter)
+ mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
@@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len;
+ int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
+ max_bulk_len = get_max_bulk_query_len(dev);
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ if (!fc_stats->bulk_query_out)
+ return -ENOMEM;
+
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
- return -ENOMEM;
+ goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
return 0;
+
+err_wq_create:
+ kfree(fc_stats->bulk_query_out);
+ return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
+ kfree(fc_stats->bulk_query_out);
+
idr_destroy(&fc_stats->counters_idr);
tmplist = llist_del_all(&fc_stats->addlist);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea1d4d26ece0..3fc575d1c3ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,6 +2,7 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
+#include "lib/mlx5.h"
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b15b27a497fc..7f70ecb1db6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -495,6 +495,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
if (do_set)
err = set_caps(dev, set_ctx, set_sz,
@@ -1217,8 +1223,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
- if (cleanup)
+ if (cleanup) {
+ mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
+ }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1369,7 +1377,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b8ba74de9555..c3aea4cc2fff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
common = radix_tree_lookup(&table->tree, rsn);
if (common)
- atomic_inc(&common->refcount);
+ refcount_inc(&common->refcount);
spin_unlock_irqrestore(&table->lock, flags);
@@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
{
- if (atomic_dec_and_test(&common->refcount))
+ if (refcount_dec_and_test(&common->refcount))
complete(&common->free);
}
@@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
common = mlx5_get_rsc(table, rsn);
if (!common) {
- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+ mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
return NOTIFY_OK;
}
@@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
if (err)
return err;
- atomic_set(&qp->common.refcount, 1);
+ refcount_set(&qp->common.refcount, 1);
init_completion(&qp->common.free);
qp->pid = current->pid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 17ce9dd56b13..18af6981e0be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -51,7 +51,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return -ENOMEM;
}
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index bc86dffdc43c..01c380425f9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -188,8 +188,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
if (err) {
- mlx5_core_err(dev, "Failed configuring rate limit(err %d): \
- rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rl->rate, rl->max_burst_sz,
rl->typical_pkt_sz);
goto out;
@@ -218,8 +217,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl);
if (!entry || !entry->refcount) {
- mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \
- are not configured\n",
+ mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 158ac0738911..38b1f402f7ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -798,9 +798,8 @@ static int qed_rdma_add_user(void *rdma_cxt,
/* Calculate the corresponding DPI address */
dpi_start_offset = p_hwfn->dpi_start_offset;
- out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size));
+ out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
+ out_params->dpi * p_hwfn->dpi_size;
out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
dpi_start_offset +
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index cd07e5301d42..3c91fa97c9a8 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1654,15 +1654,17 @@ static struct smbd_connection *_smbd_get_connection(
info->send_cq = NULL;
info->recv_cq = NULL;
- info->send_cq = ib_alloc_cq(info->id->device, info,
- info->send_credit_target, 0, IB_POLL_SOFTIRQ);
+ info->send_cq =
+ ib_alloc_cq_any(info->id->device, info,
+ info->send_credit_target, IB_POLL_SOFTIRQ);
if (IS_ERR(info->send_cq)) {
info->send_cq = NULL;
goto alloc_cq_failed;
}
- info->recv_cq = ib_alloc_cq(info->id->device, info,
- info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
+ info->recv_cq =
+ ib_alloc_cq_any(info->id->device, info,
+ info->receive_credit_max, IB_POLL_SOFTIRQ);
if (IS_ERR(info->recv_cq)) {
info->recv_cq = NULL;
goto alloc_cq_failed;
diff --git a/include/Kbuild b/include/Kbuild
index c38f0d46b267..fc2aa4e20658 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -945,12 +945,6 @@ header-test- += net/xdp.h
header-test- += net/xdp_priv.h
header-test- += pcmcia/cistpl.h
header-test- += pcmcia/ds.h
-header-test- += rdma/ib.h
-header-test- += rdma/iw_portmap.h
-header-test- += rdma/opa_port_info.h
-header-test- += rdma/rdmavt_cq.h
-header-test- += rdma/restrack.h
-header-test- += rdma/signature.h
header-test- += rdma/tid_rdma_defs.h
header-test- += scsi/fc/fc_encaps.h
header-test- += scsi/fc/fc_fc2.h
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c2f056b5766d..54c3ff8d316b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -328,6 +328,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
+ MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
@@ -345,6 +346,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+ MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
@@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err {
u8 syndrome;
};
+struct mlx5_eqe_xrq_err {
+ __be32 reserved1[5];
+ __be32 type_xrqn;
+ __be32 reserved2;
+};
+
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
@@ -698,6 +706,7 @@ union ev_data {
struct mlx5_eqe_pps pps;
struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning;
+ struct mlx5_eqe_xrq_err xrq_err;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 0e6da1840c7d..0acd28f2e62c 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -47,6 +47,7 @@
#include <linux/interrupt.h>
#include <linux/idr.h>
#include <linux/notifier.h>
+#include <linux/refcount.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -398,7 +399,7 @@ enum mlx5_res_type {
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
};
@@ -488,6 +489,7 @@ struct mlx5_fc_stats {
struct delayed_work work;
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
+ u32 *bulk_query_out;
};
struct mlx5_events;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index f049af3f3cd8..f16fe96c9acc 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -75,6 +75,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_RDMA_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
};
enum {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b8b570c30b5e..66b60afd5e06 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -172,6 +172,8 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
+ MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
+ MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
@@ -806,7 +808,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp_csum[0x1];
u8 swp_lso[0x1];
u8 cqe_checksum_full[0x1];
- u8 reserved_at_24[0xc];
+ u8 reserved_at_24[0x5];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 reserved_at_2a[0x6];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -944,7 +948,9 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
- u8 reserved_at_100[0x700];
+ struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
+
+ u8 reserved_at_120[0x6E0];
};
struct mlx5_ifc_calc_op {
@@ -1040,6 +1046,21 @@ enum {
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
+#define MLX5_FC_BULK_SIZE_FACTOR 128
+
+enum mlx5_fc_bulk_alloc_bitmask {
+ MLX5_FC_BULK_128 = (1 << 0),
+ MLX5_FC_BULK_256 = (1 << 1),
+ MLX5_FC_BULK_512 = (1 << 2),
+ MLX5_FC_BULK_1024 = (1 << 3),
+ MLX5_FC_BULK_2048 = (1 << 4),
+ MLX5_FC_BULK_4096 = (1 << 5),
+ MLX5_FC_BULK_8192 = (1 << 6),
+ MLX5_FC_BULK_16384 = (1 << 7),
+};
+
+#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
@@ -1099,7 +1120,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cache_line_128byte[0x1];
u8 reserved_at_165[0x4];
u8 rts2rts_qp_counters_set_id[0x1];
- u8 reserved_at_16a[0x5];
+ u8 reserved_at_16a[0x2];
+ u8 vnic_env_int_rq_oob[0x1];
+ u8 reserved_at_16d[0x2];
u8 qcam_reg[0x1];
u8 gid_table_size[0x10];
@@ -1228,7 +1251,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0xb];
+ u8 reserved_at_270[0x8];
+ u8 lag_tx_port_affinity[0x1];
+ u8 reserved_at_279[0x2];
u8 lag_master[0x1];
u8 num_lag_ports[0x4];
@@ -1244,7 +1269,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_at_300[0x18];
+ u8 reserved_at_300[0x10];
+ u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3];
@@ -2754,7 +2780,11 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 transmit_discard_vport_down[0x40];
- u8 reserved_at_140[0xec0];
+ u8 reserved_at_140[0xa0];
+
+ u8 internal_rq_out_of_buffer[0x20];
+
+ u8 reserved_at_200[0xe00];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -2766,7 +2796,7 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1];
u8 tls_en[0x1];
- u8 reserved_at_1[0x2];
+ u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4];
@@ -2941,6 +2971,13 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
};
+enum {
+ ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+};
+
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
@@ -7817,7 +7854,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x38];
+ u8 flow_counter_bulk[0x8];
};
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
@@ -9570,8 +9608,6 @@ struct mlx5_ifc_query_lag_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-
struct mlx5_ifc_lagc_bits ctx;
};
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 898f595ea3d6..74efca15fde7 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -225,7 +225,7 @@ struct qed_rdma_start_in_params {
struct qed_rdma_add_user_out_params {
u16 dpi;
- u64 dpi_addr;
+ void __iomem *dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 wid_count;
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 4f385ec54f80..fe2fc9e91588 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -36,6 +36,8 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/cred.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
struct ib_addr {
union {
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 479db5c98ff6..b37c674b7fe6 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -37,11 +37,6 @@
#include <rdma/ib_verbs.h>
#include <linux/interval_tree.h>
-struct umem_odp_node {
- u64 __subtree_last;
- struct rb_node rb;
-};
-
struct ib_umem_odp {
struct ib_umem umem;
struct ib_ucontext_per_mm *per_mm;
@@ -72,7 +67,15 @@ struct ib_umem_odp {
int npages;
/* Tree tracking */
- struct umem_odp_node interval_tree;
+ struct interval_tree_node interval_tree;
+
+ /*
+ * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
+ * only as an anchor for the driver to hold onto the per_mm. FIXME:
+ * This should be removed and drivers should work with the per_mm
+ * directly.
+ */
+ bool is_implicit_odp;
struct completion notifier_completion;
int dying;
@@ -88,14 +91,13 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
{
- return ALIGN_DOWN(umem_odp->umem.address, 1UL << umem_odp->page_shift);
+ return umem_odp->interval_tree.start;
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
{
- return ALIGN(umem_odp->umem.address + umem_odp->umem.length,
- 1UL << umem_odp->page_shift);
+ return umem_odp->interval_tree.last + 1;
}
static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
@@ -136,9 +138,12 @@ struct ib_ucontext_per_mm {
struct rcu_head rcu;
};
-int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
-struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
- unsigned long addr, size_t size);
+struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
+ size_t size, int access);
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+ int access);
+struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
+ unsigned long addr, size_t size);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
@@ -163,8 +168,17 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
* Find first region intersecting with address range.
* Return NULL if not found
*/
-struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
- u64 addr, u64 length);
+static inline struct ib_umem_odp *
+rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
+{
+ struct interval_tree_node *node;
+
+ node = interval_tree_iter_first(root, addr, addr + length - 1);
+ if (!node)
+ return NULL;
+ return container_of(node, struct ib_umem_odp, interval_tree);
+
+}
static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
@@ -185,9 +199,11 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
+static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4f225175cb91..de5bc352f473 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -98,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
#if defined(CONFIG_DYNAMIC_DEBUG)
#define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg(__dev, format, ##args)
-#elif defined(DEBUG)
-#define ibdev_dbg(__dev, format, args...) \
- ibdev_printk(KERN_DEBUG, __dev, format, ##args)
#else
__printf(2, 3) __cold
static inline
void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
#endif
+#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_err_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_info_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
+ __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+#else
+__printf(2, 3) __cold
+static inline
+void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
+#endif
+
union ib_gid {
u8 raw[16];
struct {
@@ -451,6 +490,16 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5
};
+enum ib_port_phys_state {
+ IB_PORT_PHYS_STATE_SLEEP = 1,
+ IB_PORT_PHYS_STATE_POLLING = 2,
+ IB_PORT_PHYS_STATE_DISABLED = 3,
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
+ IB_PORT_PHYS_STATE_LINK_UP = 5,
+ IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
+ IB_PORT_PHYS_STATE_PHY_TEST = 7,
+};
+
enum ib_port_width {
IB_WIDTH_1X = 1,
IB_WIDTH_2X = 16,
@@ -1417,8 +1466,6 @@ struct ib_ucontext {
bool cleanup_retryable;
- void (*invalidate_range)(struct ib_umem_odp *umem_odp,
- unsigned long start, unsigned long end);
struct mutex per_mm_list_lock;
struct list_head per_mm_list;
@@ -2378,6 +2425,8 @@ struct ib_device_ops {
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
+ void (*invalidate_range)(struct ib_umem_odp *umem_odp,
+ unsigned long start, unsigned long end);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -3713,6 +3762,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
NULL);
}
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+ int nr_cqe, enum ib_poll_context poll_ctx,
+ const char *caller);
+
+/**
+ * ib_alloc_cq_any: Allocate kernel CQ
+ * @dev: The IB device
+ * @private: Private data attached to the CQE
+ * @nr_cqe: Number of CQEs in the CQ
+ * @poll_ctx: Context used for polling the CQ
+ */
+static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
+ void *private, int nr_cqe,
+ enum ib_poll_context poll_ctx)
+{
+ return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
+ KBUILD_MODNAME);
+}
+
/**
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index b9fee7feeeb5..c89535047c42 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -33,6 +33,9 @@
#ifndef _IW_PORTMAP_H
#define _IW_PORTMAP_H
+#include <linux/socket.h>
+#include <linux/netlink.h>
+
#define IWPM_ULIBNAME_SIZE 32
#define IWPM_DEVNAME_SIZE 32
#define IWPM_IFNAME_SIZE 16
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 7147a9263011..bdbfe25d3854 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -33,6 +33,8 @@
#if !defined(OPA_PORT_INFO_H)
#define OPA_PORT_INFO_H
+#include <rdma/opa_smi.h>
+
#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 6631624e4d7c..ab22759de7ea 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
/**
* Send the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid);
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid);
/**
* Send, with wait/1 retry, the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid);
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid);
/**
* Send the supplied skb to a netlink group.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @group: Netlink group ID
* @flags: allocation flags
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+ unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 04c519ef6d71..574eb7278f46 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -53,6 +53,7 @@
#include <linux/kthread.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
/*
* Define an ib_cq_notify value that is not valid so we know when CQ
diff --git a/include/rdma/signature.h b/include/rdma/signature.h
index f24cc2a1d3c5..d16b0fcc8344 100644
--- a/include/rdma/signature.h
+++ b/include/rdma/signature.h
@@ -6,6 +6,8 @@
#ifndef _RDMA_SIGNATURE_H_
#define _RDMA_SIGNATURE_H_
+#include <linux/types.h>
+
enum ib_signature_prot_cap {
IB_PROT_T10DIF_TYPE_1 = 1,
IB_PROT_T10DIF_TYPE_2 = 1 << 1,
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 7e9900b0e746..88b6ca70c2fe 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index bac8dad5dd69..b21c3c209815 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -685,9 +685,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error;
/* Create the Completion Queue */
- rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
- opts.sq_depth + opts.rq_depth + 1,
- 0, IB_POLL_SOFTIRQ);
+ rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
+ opts.sq_depth + opts.rq_depth + 1,
+ IB_POLL_SOFTIRQ);
if (IS_ERR(rdma->cq))
goto error;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3fe665152d95..4d3db6ee7f09 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -454,14 +454,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk("svcrdma: error creating PD for connect request\n");
goto errout;
}
- newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
- 0, IB_POLL_WORKQUEUE);
+ newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_sq_cq)) {
dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout;
}
- newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth,
- 0, IB_POLL_WORKQUEUE);
+ newxprt->sc_rq_cq =
+ ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_rq_cq)) {
dprintk("svcrdma: error creating RQ CQ for connect request\n");
goto errout;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 805b1f35e1ca..b10aa16557f0 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -521,18 +521,17 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
init_waitqueue_head(&ep->rep_connect_wait);
ep->rep_receive_count = 0;
- sendcq = ib_alloc_cq(ia->ri_id->device, NULL,
- ep->rep_attr.cap.max_send_wr + 1,
- ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0,
- IB_POLL_WORKQUEUE);
+ sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
+ ep->rep_attr.cap.max_send_wr + 1,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq);
goto out1;
}
- recvcq = ib_alloc_cq(ia->ri_id->device, NULL,
- ep->rep_attr.cap.max_recv_wr + 1,
- 0, IB_POLL_WORKQUEUE);
+ recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
+ ep->rep_attr.cap.max_recv_wr + 1,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq);
goto out2;