diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 177 |
1 files changed, 114 insertions, 63 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 94fe253d4956..76d6c2557d0c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -476,24 +476,51 @@ out: return err; } +struct mlx5_ib_vlan_info { + u16 vlan_id; + bool vlan; +}; + +static int get_lower_dev_vlan(struct net_device *lower_dev, void *data) +{ + struct mlx5_ib_vlan_info *vlan_info = data; + + if (is_vlan_dev(lower_dev)) { + vlan_info->vlan = true; + vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev); + } + /* We are interested only in first level vlan device, so + * always return 1 to stop iterating over next level devices. + */ + return 1; +} + static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr) { enum ib_gid_type gid_type = IB_GID_TYPE_IB; + struct mlx5_ib_vlan_info vlan_info = { }; u8 roce_version = 0; u8 roce_l3_type = 0; - bool vlan = false; u8 mac[ETH_ALEN]; - u16 vlan_id = 0; if (gid) { gid_type = attr->gid_type; ether_addr_copy(mac, attr->ndev->dev_addr); if (is_vlan_dev(attr->ndev)) { - vlan = true; - vlan_id = vlan_dev_vlan_id(attr->ndev); + vlan_info.vlan = true; + vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev); + } else { + /* If the netdev is upper device and if it's lower + * lower device is vlan device, consider vlan id of + * the lower vlan device for this gid entry. + */ + rcu_read_lock(); + netdev_walk_all_lower_dev_rcu(attr->ndev, + get_lower_dev_vlan, &vlan_info); + rcu_read_unlock(); } } @@ -514,8 +541,9 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, } return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, - roce_l3_type, gid->raw, mac, vlan, - vlan_id, port_num); + roce_l3_type, gid->raw, mac, + vlan_info.vlan, vlan_info.vlan_id, + port_num); } static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, @@ -923,11 +951,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (MLX5_CAP_GEN(mdev, pg)) - props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; - props->odp_caps = dev->odp_caps; -#endif + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { + if (MLX5_CAP_GEN(mdev, pg)) + props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; + props->odp_caps = dev->odp_caps; + } if (MLX5_CAP_GEN(mdev, cd)) props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; @@ -1763,9 +1791,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (err) goto out_sys_pages; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; -#endif + if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING) + context->ibucontext.invalidate_range = + &mlx5_ib_invalidate_range; if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { err = mlx5_ib_devx_create(dev, true); @@ -1897,12 +1925,10 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_bfreg_info *bfregi; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING /* All umem's must be destroyed before destroying the ucontext. */ mutex_lock(&ibcontext->per_mm_list_lock); WARN_ON(!list_empty(&ibcontext->per_mm_list)); mutex_unlock(&ibcontext->per_mm_list_lock); -#endif bfregi = &context->bfregi; mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); @@ -2265,7 +2291,7 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; - pd = kmalloc(sizeof(*pd), GFP_KERNEL); + pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); @@ -2335,10 +2361,29 @@ static u8 get_match_criteria_enable(u32 *match_criteria) return match_criteria_enable; } -static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) +static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) { - MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); - MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); + u8 entry_mask; + u8 entry_val; + int err = 0; + + if (!mask) + goto out; + + entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, + ip_protocol); + entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, + ip_protocol); + if (!entry_mask) { + MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); + MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); + goto out; + } + /* Don't override existing ip protocol */ + if (mask != entry_mask || val != entry_val) + err = -EINVAL; +out: + return err; } static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, @@ -2572,8 +2617,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, set_tos(headers_c, headers_v, ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); - set_proto(headers_c, headers_v, - ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); + if (set_proto(headers_c, headers_v, + ib_spec->ipv4.mask.proto, + ib_spec->ipv4.val.proto)) + return -EINVAL; break; case IB_FLOW_SPEC_IPV6: if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) @@ -2612,9 +2659,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, ib_spec->ipv6.mask.traffic_class, ib_spec->ipv6.val.traffic_class); - set_proto(headers_c, headers_v, - ib_spec->ipv6.mask.next_hdr, - ib_spec->ipv6.val.next_hdr); + if (set_proto(headers_c, headers_v, + ib_spec->ipv6.mask.next_hdr, + ib_spec->ipv6.val.next_hdr)) + return -EINVAL; set_flow_label(misc_params_c, misc_params_v, ntohl(ib_spec->ipv6.mask.flow_label), @@ -2635,10 +2683,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, LAST_TCP_UDP_FIELD)) return -EOPNOTSUPP; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - 0xff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - IPPROTO_TCP); + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) + return -EINVAL; MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, ntohs(ib_spec->tcp_udp.mask.src_port)); @@ -2655,10 +2701,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, LAST_TCP_UDP_FIELD)) return -EOPNOTSUPP; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - 0xff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - IPPROTO_UDP); + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) + return -EINVAL; MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(ib_spec->tcp_udp.mask.src_port)); @@ -2674,6 +2718,9 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, if (ib_spec->gre.mask.c_ks_res0_ver) return -EOPNOTSUPP; + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) + return -EINVAL; + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, @@ -3825,7 +3872,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) return ERR_PTR(-ENOMEM); - dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL); + dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); @@ -4106,7 +4153,7 @@ static ssize_t fw_pages_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = - container_of(device, struct mlx5_ib_dev, ib_dev.dev); + rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); } @@ -4116,7 +4163,7 @@ static ssize_t reg_pages_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = - container_of(device, struct mlx5_ib_dev, ib_dev.dev); + rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); } @@ -4126,7 +4173,8 @@ static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = - container_of(device, struct mlx5_ib_dev, ib_dev.dev); + rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); + return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); } static DEVICE_ATTR_RO(hca_type); @@ -4135,7 +4183,8 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = - container_of(device, struct mlx5_ib_dev, ib_dev.dev); + rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); + return sprintf(buf, "%x\n", dev->mdev->rev_id); } static DEVICE_ATTR_RO(hw_rev); @@ -4144,7 +4193,8 @@ static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct mlx5_ib_dev *dev = - container_of(device, struct mlx5_ib_dev, ib_dev.dev); + rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); + return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, dev->mdev->board_id); } @@ -5508,9 +5558,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); - err = mlx5_ib_init_cong_debugfs(ibdev, port_num); - if (err) - goto unbind; + mlx5_ib_init_cong_debugfs(ibdev, port_num); return true; @@ -5722,11 +5770,12 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_cleanup_multiport_master(dev); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - cleanup_srcu_struct(&dev->mr_srcu); - drain_workqueue(dev->advise_mr_wq); - destroy_workqueue(dev->advise_mr_wq); -#endif + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { + srcu_barrier(&dev->mr_srcu); + cleanup_srcu_struct(&dev->mr_srcu); + drain_workqueue(dev->advise_mr_wq); + destroy_workqueue(dev->advise_mr_wq); + } kfree(dev->port); } @@ -5779,19 +5828,20 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->memic.memic_lock); dev->memic.dev = mdev; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - dev->advise_mr_wq = alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0); - if (!dev->advise_mr_wq) { - err = -ENOMEM; - goto err_mp; - } + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { + dev->advise_mr_wq = + alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0); + if (!dev->advise_mr_wq) { + err = -ENOMEM; + goto err_mp; + } - err = init_srcu_struct(&dev->mr_srcu); - if (err) { - destroy_workqueue(dev->advise_mr_wq); - goto err_mp; + err = init_srcu_struct(&dev->mr_srcu); + if (err) { + destroy_workqueue(dev->advise_mr_wq); + goto err_mp; + } } -#endif return 0; err_mp: @@ -6154,7 +6204,7 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) return mlx5_ib_odp_init_one(dev); } -void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) { mlx5_ib_odp_cleanup_one(dev); } @@ -6183,8 +6233,9 @@ void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) { - return mlx5_ib_init_cong_debugfs(dev, - mlx5_core_native_port_num(dev->mdev) - 1); + mlx5_ib_init_cong_debugfs(dev, + mlx5_core_native_port_num(dev->mdev) - 1); + return 0; } static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) @@ -6234,7 +6285,7 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) name = "mlx5_%d"; else name = "mlx5_bond_%d"; - return ib_register_device(&dev->ib_dev, name, NULL); + return ib_register_device(&dev->ib_dev, name); } void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) @@ -6485,7 +6536,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) return mlx5_ib_add_slave_port(mdev); - dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); + dev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!dev) return NULL; |