aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c680
1 files changed, 443 insertions, 237 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d3dd290ae1b1..340290b883fe 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -156,6 +156,34 @@ static int get_port_state(struct ib_device *ibdev,
return ret;
}
+static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ u8 *port_num)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct net_device *rep_ndev;
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (!port->rep)
+ continue;
+
+ read_lock(&port->roce.netdev_lock);
+ rep_ndev = mlx5_ib_get_rep_netdev(esw,
+ port->rep->vport);
+ if (rep_ndev == ndev) {
+ read_unlock(&port->roce.netdev_lock);
+ *port_num = i + 1;
+ return &port->roce;
+ }
+ read_unlock(&port->roce.netdev_lock);
+ }
+
+ return NULL;
+}
+
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -172,22 +200,17 @@ static int mlx5_netdev_event(struct notifier_block *this,
switch (event) {
case NETDEV_REGISTER:
+ /* Should already be registered during the load */
+ if (ibdev->is_rep)
+ break;
write_lock(&roce->netdev_lock);
- if (ibdev->rep) {
- struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
- struct net_device *rep_ndev;
-
- rep_ndev = mlx5_ib_get_rep_netdev(esw,
- ibdev->rep->vport);
- if (rep_ndev == ndev)
- roce->netdev = ndev;
- } else if (ndev->dev.parent == &mdev->pdev->dev) {
+ if (ndev->dev.parent == mdev->device)
roce->netdev = ndev;
- }
write_unlock(&roce->netdev_lock);
break;
case NETDEV_UNREGISTER:
+ /* In case of reps, ib device goes away before the netdevs */
write_lock(&roce->netdev_lock);
if (roce->netdev == ndev)
roce->netdev = NULL;
@@ -205,6 +228,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
+ if (ibdev->is_rep)
+ roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
+ if (!roce)
+ return NOTIFY_DONE;
if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
@@ -257,11 +284,11 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce[port_num - 1].netdev_lock);
- ndev = ibdev->roce[port_num - 1].netdev;
+ read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
+ ndev = ibdev->port[port_num - 1].roce.netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+ read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out:
mlx5_ib_put_native_port_mdev(ibdev, port_num);
@@ -479,9 +506,14 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
+ * Use native port in case of reps
*/
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
- mdev_port_num);
+ if (dev->is_rep)
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ 1);
+ else
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ mdev_port_num);
if (err)
goto out;
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
@@ -542,52 +574,22 @@ out:
return err;
}
-struct mlx5_ib_vlan_info {
- u16 vlan_id;
- bool vlan;
-};
-
-static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
-{
- struct mlx5_ib_vlan_info *vlan_info = data;
-
- if (is_vlan_dev(lower_dev)) {
- vlan_info->vlan = true;
- vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev);
- }
- /* We are interested only in first level vlan device, so
- * always return 1 to stop iterating over next level devices.
- */
- return 1;
-}
-
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum ib_gid_type gid_type = IB_GID_TYPE_IB;
- struct mlx5_ib_vlan_info vlan_info = { };
+ u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
u8 mac[ETH_ALEN];
+ int ret;
if (gid) {
gid_type = attr->gid_type;
- ether_addr_copy(mac, attr->ndev->dev_addr);
-
- if (is_vlan_dev(attr->ndev)) {
- vlan_info.vlan = true;
- vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev);
- } else {
- /* If the netdev is upper device and if it's lower
- * lower device is vlan device, consider vlan id of
- * the lower vlan device for this gid entry.
- */
- rcu_read_lock();
- netdev_walk_all_lower_dev_rcu(attr->ndev,
- get_lower_dev_vlan, &vlan_info);
- rcu_read_unlock();
- }
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
}
switch (gid_type) {
@@ -608,7 +610,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac,
- vlan_info.vlan, vlan_info.vlan_id,
+ vlan_id < VLAN_CFI_MASK, vlan_id,
port_num);
}
@@ -1407,7 +1409,9 @@ static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
{
int ret;
- /* Only link layer == ethernet is valid for representors */
+ /* Only link layer == ethernet is valid for representors
+ * and we always use port 1
+ */
ret = mlx5_query_port_roce(ibdev, port, props);
if (ret || !props)
return ret;
@@ -1954,11 +1958,11 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
print_lib_caps(dev, context->lib_caps);
if (dev->lag_active) {
- u8 port = mlx5_core_native_port_num(dev->mdev);
+ u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
- 1, &dev->roce[port].tx_port_affinity));
+ 1, &dev->port[port].roce.tx_port_affinity));
}
return 0;
@@ -2011,7 +2015,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
+ return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -2060,21 +2064,22 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
+ !(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
return -EOPNOTSUPP;
- if (vma->vm_flags & VM_WRITE)
+ if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE;
- if (!dev->mdev->clock_info_page)
+ if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
- return rdma_user_mmap_page(&context->ibucontext, vma,
- dev->mdev->clock_info_page, PAGE_SIZE);
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(dev->mdev->clock_info));
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@ -2202,7 +2207,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
page_idx + npages)
return -EINVAL;
- pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
+ pfn = ((dev->mdev->bar_addr +
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
@@ -2259,89 +2264,200 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
+static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
+ u32 type)
{
- u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
- struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
- phys_addr_t memic_addr;
- struct mlx5_ib_dm *dm;
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
+ return -EOPNOTSUPP;
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ if (!capable(CAP_SYS_RAWIO) ||
+ !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
+ return -EOPNOTSUPP;
+ break;
+ }
+
+ return 0;
+}
+
+static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct mlx5_ib_dm *dm,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
u64 start_offset;
u32 page_idx;
int err;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return ERR_PTR(-ENOMEM);
-
- mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
- attr->length, act_size, attr->alignment);
+ dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
- err = mlx5_cmd_alloc_memic(memic, &memic_addr,
- act_size, attr->alignment);
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
+ dm->size, attr->alignment);
if (err)
- goto err_free;
+ return err;
- start_offset = memic_addr & ~PAGE_MASK;
- page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
+ page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
PAGE_SHIFT;
err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_dealloc;
+
+ start_offset = dm->dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&start_offset, sizeof(start_offset));
if (err)
goto err_dealloc;
+ bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
+ DIV_ROUND_UP(dm->size, PAGE_SIZE));
+
+ return 0;
+
+err_dealloc:
+ mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+
+ return err;
+}
+
+static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct mlx5_ib_dm *dm,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ u64 act_size;
+ int err;
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->size = act_size;
+ err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
+ to_mucontext(ctx)->devx_uid, &dm->dev_addr,
+ &dm->icm_dm.obj_id);
+ if (err)
+ return err;
+
err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx, sizeof(page_idx));
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->dev_addr, sizeof(dm->dev_addr));
if (err)
- goto err_dealloc;
+ mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
+ to_mucontext(ctx)->devx_uid,
+ dm->dev_addr, dm->icm_dm.obj_id);
- bitmap_set(to_mucontext(context)->dm_pages, page_idx,
- DIV_ROUND_UP(act_size, PAGE_SIZE));
+ return err;
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm *dm;
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
- dm->dev_addr = memic_addr;
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ err = check_dm_type_support(to_mdev(ibdev), type);
+ if (err)
+ return ERR_PTR(err);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->type = type;
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ err = handle_alloc_dm_memic(context, dm,
+ attr,
+ attrs);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err)
+ goto err_free;
return &dm->ibdm;
-err_dealloc:
- mlx5_cmd_dealloc_memic(memic, memic_addr,
- act_size);
err_free:
kfree(dm);
return ERR_PTR(err);
}
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
+int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{
- struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm);
- u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
u32 page_idx;
int ret;
- ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
- if (ret)
- return ret;
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+ if (ret)
+ return ret;
- page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
- PAGE_SHIFT;
- bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
- page_idx,
- DIV_ROUND_UP(act_size, PAGE_SIZE));
+ page_idx = (dm->dev_addr -
+ pci_resource_start(dm_db->dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dm_db->dev,
+ memic_bar_start_addr)) >>
+ PAGE_SHIFT;
+ bitmap_clear(ctx->dm_pages, page_idx,
+ DIV_ROUND_UP(dm->size, PAGE_SIZE));
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
+ ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
kfree(dm);
return 0;
}
-static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx5_ib_pd *pd = to_mpd(ibpd);
struct ib_device *ibdev = ibpd->device;
@@ -2350,8 +2466,10 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
- uid = context ? to_mucontext(context)->devx_uid : 0;
+ uid = context ? context->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
@@ -2361,7 +2479,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
pd->uid = uid;
- if (context) {
+ if (udata) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
@@ -2372,7 +2490,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0;
}
-static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
@@ -3151,10 +3269,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (ft_type == MLX5_IB_FT_RX) {
fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
prio = &dev->flow_db->prios[priority];
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
@@ -3164,7 +3282,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
log_max_ft_size));
fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
prio = &dev->flow_db->egress_prios[priority];
- if (!dev->rep &&
+ if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
}
@@ -3197,12 +3315,11 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (!ns)
return ERR_PTR(-ENOTSUPP);
- if (num_entries > max_table_size)
- return ERR_PTR(-ENOMEM);
+ max_table_size = min_t(int, num_entries, max_table_size);
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, num_entries, num_groups,
+ return _get_prio(ns, prio, priority, max_table_size, num_groups,
flags);
return prio;
@@ -3370,7 +3487,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
- if (dev->rep && is_egress)
+ if (dev->is_rep && is_egress)
return ERR_PTR(-EINVAL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -3401,13 +3518,17 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn);
- if (dev->rep) {
+ if (dev->is_rep) {
void *misc;
+ if (!dev->port[flow_attr->port - 1].rep) {
+ err = -EINVAL;
+ goto free;
+ }
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port,
- dev->rep->vport);
+ dev->port[flow_attr->port - 1].rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -3769,11 +3890,16 @@ _get_flow_table(struct mlx5_ib_dev *dev,
bool mcast)
{
struct mlx5_flow_namespace *ns = NULL;
- struct mlx5_ib_flow_prio *prio;
- int max_table_size;
+ struct mlx5_ib_flow_prio *prio = NULL;
+ int max_table_size = 0;
u32 flags = 0;
int priority;
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
@@ -3782,20 +3908,18 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
- log_max_ft_size));
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ max_table_size = BIT(
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+ priority = FDB_BYPASS_PATH;
}
- if (max_table_size < MLX5_FS_MAX_ENTRIES)
- return ERR_PTR(-ENOMEM);
-
- if (mcast)
- priority = MLX5_IB_FLOW_MCAST_PRIO;
- else
- priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
if (!ns)
@@ -3803,13 +3927,18 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
prio = &dev->flow_db->prios[priority];
- else
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
prio = &dev->flow_db->egress_prios[priority];
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ prio = &dev->flow_db->fdb;
+
+ if (!prio)
+ return ERR_PTR(-EINVAL);
if (prio->flow_table)
return prio;
- return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+ return _get_prio(ns, prio, priority, max_table_size,
MLX5_FS_MAX_TYPES, flags);
}
@@ -4356,9 +4485,13 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
+ u8 port = (eqe->data.port.port >> 4) & 0xf;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
- schedule_work(&ibdev->delay_drop.delay_drop_work);
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
break;
default: /* do nothing */
return;
@@ -4505,7 +4638,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= dev->num_ports; port++) {
+ for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -4536,7 +4669,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
@@ -4551,10 +4684,6 @@ static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!dprops)
goto out;
- err = set_has_smi_cap(dev);
- if (err)
- goto out;
-
err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
@@ -4583,6 +4712,16 @@ out:
return err;
}
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+{
+ /* For representors use port 1, is this is the only native
+ * port
+ */
+ if (dev->is_rep)
+ return __get_port_caps(dev, 1);
+ return __get_port_caps(dev, port);
+}
+
static void destroy_umrc_res(struct mlx5_ib_dev *dev)
{
int err;
@@ -4592,7 +4731,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
if (dev->umrc.qp)
- mlx5_ib_destroy_qp(dev->umrc.qp);
+ mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
if (dev->umrc.cq)
ib_free_cq(dev->umrc.cq);
if (dev->umrc.pd)
@@ -4697,7 +4836,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
return 0;
error_4:
- mlx5_ib_destroy_qp(qp);
+ mlx5_ib_destroy_qp(qp, NULL);
dev->umrc.qp = NULL;
error_3:
@@ -4748,11 +4887,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0);
- ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
+ ret = mlx5_ib_alloc_pd(devr->p0, NULL);
if (ret)
goto error0;
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
+ devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0);
goto error1;
@@ -4764,7 +4903,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->c0->cq_context = NULL;
atomic_set(&devr->c0->usecnt, 0);
- devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
if (IS_ERR(devr->x0)) {
ret = PTR_ERR(devr->x0);
goto error2;
@@ -4775,7 +4914,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
mutex_init(&devr->x0->tgt_qp_mutex);
INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
- devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
if (IS_ERR(devr->x1)) {
ret = PTR_ERR(devr->x1);
goto error3;
@@ -4793,19 +4932,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
attr.ext.cq = devr->c0;
attr.ext.xrc.xrcd = devr->x0;
- devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s0)) {
- ret = PTR_ERR(devr->s0);
+ devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+ if (!devr->s0) {
+ ret = -ENOMEM;
goto error4;
}
+
devr->s0->device = &dev->ib_dev;
devr->s0->pd = devr->p0;
- devr->s0->uobject = NULL;
- devr->s0->event_handler = NULL;
- devr->s0->srq_context = NULL;
devr->s0->srq_type = IB_SRQT_XRC;
devr->s0->ext.xrc.xrcd = devr->x0;
devr->s0->ext.cq = devr->c0;
+ ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
+ if (ret)
+ goto err_create;
+
atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
atomic_inc(&devr->s0->ext.cq->usecnt);
atomic_inc(&devr->p0->usecnt);
@@ -4815,18 +4956,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
attr.attr.max_sge = 1;
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC;
- devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
- if (IS_ERR(devr->s1)) {
- ret = PTR_ERR(devr->s1);
+ devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+ if (!devr->s1) {
+ ret = -ENOMEM;
goto error5;
}
+
devr->s1->device = &dev->ib_dev;
devr->s1->pd = devr->p0;
- devr->s1->uobject = NULL;
- devr->s1->event_handler = NULL;
- devr->s1->srq_context = NULL;
devr->s1->srq_type = IB_SRQT_BASIC;
devr->s1->ext.cq = devr->c0;
+
+ ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
+ if (ret)
+ goto error6;
+
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s1->usecnt, 0);
@@ -4838,16 +4982,20 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
return 0;
+error6:
+ kfree(devr->s1);
error5:
- mlx5_ib_destroy_srq(devr->s0);
+ mlx5_ib_destroy_srq(devr->s0, NULL);
+err_create:
+ kfree(devr->s0);
error4:
- mlx5_ib_dealloc_xrcd(devr->x1);
+ mlx5_ib_dealloc_xrcd(devr->x1, NULL);
error3:
- mlx5_ib_dealloc_xrcd(devr->x0);
+ mlx5_ib_dealloc_xrcd(devr->x0, NULL);
error2:
- mlx5_ib_destroy_cq(devr->c0);
+ mlx5_ib_destroy_cq(devr->c0, NULL);
error1:
- mlx5_ib_dealloc_pd(devr->p0);
+ mlx5_ib_dealloc_pd(devr->p0, NULL);
error0:
kfree(devr->p0);
return ret;
@@ -4855,20 +5003,20 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
- struct mlx5_ib_dev *dev =
- container_of(devr, struct mlx5_ib_dev, devr);
int port;
- mlx5_ib_destroy_srq(devr->s1);
- mlx5_ib_destroy_srq(devr->s0);
- mlx5_ib_dealloc_xrcd(devr->x0);
- mlx5_ib_dealloc_xrcd(devr->x1);
- mlx5_ib_destroy_cq(devr->c0);
- mlx5_ib_dealloc_pd(devr->p0);
+ mlx5_ib_destroy_srq(devr->s1, NULL);
+ kfree(devr->s1);
+ mlx5_ib_destroy_srq(devr->s0, NULL);
+ kfree(devr->s0);
+ mlx5_ib_dealloc_xrcd(devr->x0, NULL);
+ mlx5_ib_dealloc_xrcd(devr->x1, NULL);
+ mlx5_ib_destroy_cq(devr->c0, NULL);
+ mlx5_ib_dealloc_pd(devr->p0, NULL);
kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */
- for (port = 0; port < dev->num_ports; ++port)
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
cancel_work_sync(&devr->ports[port].pkey_change_work);
}
@@ -5011,10 +5159,10 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
- dev->roce[port_num].nb.notifier_call = NULL;
+ dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
}
@@ -5023,9 +5171,9 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce[port_num].nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce[port_num].nb);
- dev->roce[port_num].nb.notifier_call = NULL;
+ if (dev->port[port_num].roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -5574,7 +5722,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
port_num + 1);
- ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+ ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
@@ -5675,7 +5823,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
}
if (bound) {
- dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ dev_dbg(mpi->mdev->device,
+ "removing port from unaffiliated list.\n");
mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
list_del(&mpi->list);
break;
@@ -5733,7 +5882,10 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
UVERBS_ATTR_TYPE(u16),
- UA_MANDATORY));
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type,
+ UA_OPTIONAL));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
@@ -5824,35 +5976,58 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
return &mcounters->ibcntrs;
}
-void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
mlx5_ib_cleanup_multiport_master(dev);
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
srcu_barrier(&dev->mr_srcu);
cleanup_srcu_struct(&dev->mr_srcu);
}
- kfree(dev->port);
+
+ WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+
+ WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
+ !bitmap_empty(
+ dev->dm.steering_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+
+ kfree(dev->dm.steering_sw_icm_alloc_blocks);
+
+ WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
+ !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(
+ mdev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+
+ kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
}
-int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
+ u64 header_modify_icm_blocks = 0;
+ u64 steering_icm_blocks = 0;
int err;
int i;
- dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
- GFP_KERNEL);
- if (!dev->port)
- return -ENOMEM;
-
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
- rwlock_init(&dev->roce[i].netdev_lock);
+ rwlock_init(&dev->port[i].roce.netdev_lock);
+ dev->port[i].roce.dev = dev;
+ dev->port[i].roce.native_port_num = i + 1;
+ dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
err = mlx5_ib_init_multiport_master(dev);
if (err)
- goto err_free_port;
+ return err;
+
+ err = set_has_smi_cap(dev);
+ if (err)
+ return err;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -5874,28 +6049,60 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
- dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
- spin_lock_init(&dev->memic.memic_lock);
- dev->memic.dev = mdev;
+ if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
+ if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
+ steering_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(mdev,
+ log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+
+ dev->dm.steering_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(steering_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->dm.steering_sw_icm_alloc_blocks)
+ goto err_mp;
+ }
+
+ if (MLX5_CAP64_DEV_MEM(mdev,
+ header_modify_sw_icm_start_address)) {
+ header_modify_icm_blocks = BIT(
+ MLX5_CAP_DEV_MEM(
+ mdev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+
+ dev->dm.header_modify_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->dm.header_modify_sw_icm_alloc_blocks)
+ goto err_dm;
+ }
+ }
+
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
err = init_srcu_struct(&dev->mr_srcu);
if (err)
- goto err_mp;
+ goto err_dm;
}
return 0;
+
+err_dm:
+ kfree(dev->dm.steering_sw_icm_alloc_blocks);
+ kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
+
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
-err_free_port:
- kfree(dev->port);
-
return -ENOMEM;
}
@@ -5911,20 +6118,6 @@ static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
return 0;
}
-int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
-{
- struct mlx5_ib_dev *nic_dev;
-
- nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
-
- if (!nic_dev)
- return -EINVAL;
-
- dev->flow_db = nic_dev->flow_db;
-
- return 0;
-}
-
static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
{
kfree(dev->flow_db);
@@ -5984,7 +6177,10 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
};
@@ -6020,7 +6216,7 @@ static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
.reg_dm_mr = mlx5_ib_reg_dm_mr,
};
-int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
int err;
@@ -6086,7 +6282,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
}
- if (MLX5_CAP_DEV_MEM(mdev, memic))
+ if (MLX5_CAP_DEV_MEM(mdev, memic) ||
+ MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
@@ -6126,7 +6324,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.query_port = mlx5_ib_rep_query_port,
};
-int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
{
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
return 0;
@@ -6144,13 +6342,6 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
u8 port_num;
- int i;
-
- for (i = 0; i < dev->num_ports; i++) {
- dev->roce[i].dev = dev;
- dev->roce[i].native_port_num = i + 1;
- dev->roce[i].last_port_state = IB_PORT_DOWN;
- }
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
@@ -6162,6 +6353,7 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ /* Register only for native ports */
return mlx5_add_netdev_notifier(dev, port_num);
}
@@ -6172,7 +6364,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_remove_netdev_notifier(dev, port_num);
}
-int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
@@ -6188,7 +6380,7 @@ int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
return err;
}
-void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_stage_common_roce_cleanup(dev);
}
@@ -6235,12 +6427,12 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
}
}
-int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
{
return create_dev_resources(&dev->devr);
}
-void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
{
destroy_dev_resources(&dev->devr);
}
@@ -6262,7 +6454,7 @@ static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
.get_hw_stats = mlx5_ib_get_hw_stats,
};
-int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
@@ -6273,7 +6465,7 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
@@ -6303,7 +6495,7 @@ static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
}
-int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -6318,13 +6510,13 @@ int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
return err;
}
-void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
}
-int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
const char *name;
@@ -6336,17 +6528,17 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, name);
}
-void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
}
-void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
{
ib_unregister_device(&dev->ib_dev);
}
-int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
{
return create_umr_res(dev);
}
@@ -6401,6 +6593,9 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev);
}
+
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
}
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
@@ -6522,6 +6717,9 @@ const struct mlx5_ib_profile uplink_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
NULL,
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_stage_devx_init,
+ mlx5_ib_stage_devx_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6563,7 +6761,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
- dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ dev_dbg(mdev->device,
+ "no suitable IB device found to bind to, added to unaffiliated list.\n");
}
mutex_unlock(&mlx5_ib_multiport_mutex);
@@ -6575,12 +6774,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
+ int num_ports;
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
- mlx5_ib_register_vport_reps(mdev);
+ if (!mlx5_core_mp_enabled(mdev))
+ mlx5_ib_register_vport_reps(mdev);
return mdev;
}
@@ -6590,13 +6791,20 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
return mlx5_ib_add_slave_port(mdev);
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!dev)
return NULL;
+ dev->port = kcalloc(num_ports, sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port) {
+ ib_dealloc_device((struct ib_device *)dev);
+ return NULL;
+ }
dev->mdev = mdev;
- dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
- MLX5_CAP_GEN(mdev, num_vhca_ports));
+ dev->num_ports = num_ports;
return __mlx5_ib_add(dev, &pf_profile);
}
@@ -6623,8 +6831,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
dev = context;
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
-
- ib_dealloc_device((struct ib_device *)dev);
}
static struct mlx5_interface mlx5_ib_interface = {