From fccec5b89ac61ebe2f353feecd08a16621f2418b Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 1 Mar 2018 13:58:13 -0800 Subject: RDMA/nldev: provide detailed MR information Implement the RDMA nldev netlink interface for dumping detailed MR information. Signed-off-by: Steve Wise Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/rdma/ib_verbs.h') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 73b2387e3f74..7df3274818f9 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1772,6 +1772,11 @@ struct ib_mr { struct ib_uobject *uobject; /* user */ struct list_head qp_entry; /* FR */ }; + + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_mw { -- cgit v1.2.3-59-g8ed1b From 19b1f54099b6ee334acbfbcfbdffd1d1f057216d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 11 Mar 2018 13:51:35 +0200 Subject: RDMA/verbs: Simplify modify QP check All callers to ib_modify_qp_is_ok() provides enum ib_qp_state makes the checks of out-of-scope redundant. Let's remove them together with updating function signature to return boolean result. Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 20 ++++++++------------ include/rdma/ib_verbs.h | 6 +++--- 2 files changed, 11 insertions(+), 15 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 873b7aa9e8dd..f7de886da430 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1263,34 +1263,30 @@ static const struct { } }; -int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask, - enum rdma_link_layer ll) +bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll) { enum ib_qp_attr_mask req_param, opt_param; - if (cur_state < 0 || cur_state > IB_QPS_ERR || - next_state < 0 || next_state > IB_QPS_ERR) - return 0; - if (mask & IB_QP_CUR_STATE && cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) - return 0; + return false; if (!qp_state_table[cur_state][next_state].valid) - return 0; + return false; req_param = qp_state_table[cur_state][next_state].req_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type]; if ((mask & req_param) != req_param) - return 0; + return false; if (mask & ~(req_param | opt_param | IB_QP_STATE)) - return 0; + return false; - return 1; + return true; } EXPORT_SYMBOL(ib_modify_qp_is_ok); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7df3274818f9..5eb10c2470f0 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2480,9 +2480,9 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, * transition from cur_state to next_state is allowed by the IB spec, * and that the attribute mask supplied is allowed for the transition. */ -int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask, - enum rdma_link_layer ll); +bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll); void ib_register_event_handler(struct ib_event_handler *event_handler); void ib_unregister_event_handler(struct ib_event_handler *event_handler); -- cgit v1.2.3-59-g8ed1b From b26c4a1138dff34cff507bafa4c87e365f4145a6 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 13 Mar 2018 16:06:12 +0200 Subject: IB/{core, ipoib}: Simplify ib_find_gid() for unused ndev ib_find_gid() is only used by IPoIB driver. For IB link layer, GID table entries are not based on netdevice. Netdevice parameter is unused here. Therefore, it is removed. Reviewed-by: Daniel Jurgens Reviewed-by: Mark Bloch Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Reviewed-by: Yuval Shaia Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 3 +-- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 +- include/rdma/ib_verbs.h | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index bb065c9449be..0ab99e62cc5c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1050,13 +1050,12 @@ EXPORT_SYMBOL(ib_modify_port); * a specified GID value occurs. Its searches only for IB link layer. * @device: The device to query. * @gid: The GID value to search for. - * @ndev: The ndev related to the GID to search for. * @port_num: The port number of the device where the GID value was found. * @index: The index into the GID table where the GID was found. This * parameter may be NULL. */ int ib_find_gid(struct ib_device *device, union ib_gid *gid, - struct net_device *ndev, u8 *port_num, u16 *index) + u8 *port_num, u16 *index) { union ib_gid tmp_gid; int ret, port, i; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 10384ea50bed..f47f9ace1f48 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1085,7 +1085,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) netif_addr_unlock_bh(priv->dev); - err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index); + err = ib_find_gid(priv->ca, &search_gid, &port, &index); netif_addr_lock_bh(priv->dev); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5eb10c2470f0..ac3791e056cf 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2857,7 +2857,7 @@ int ib_modify_port(struct ib_device *device, struct ib_port_modify *port_modify); int ib_find_gid(struct ib_device *device, union ib_gid *gid, - struct net_device *ndev, u8 *port_num, u16 *index); + u8 *port_num, u16 *index); int ib_find_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index); -- cgit v1.2.3-59-g8ed1b From b19744e965abed7ad0167c25097f405b88ce5d13 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 19 Mar 2018 08:07:14 +0200 Subject: IB/core: Remove unimplemented ib_peek_cq ib_peek_cq() verb doesn't seem be implemented in current code. There is some past reference to it at [1] about it being unimplemented. Lot of user documentation created out of kdoc refers to this unimplemented API. Therefore, remove unimplemented API. [1] http://lists.openfabrics.org/pipermail/ofw/2008-May/002465.html Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/rdma/ib_verbs.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ac3791e056cf..3cc48f34e3e4 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3225,18 +3225,6 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, return cq->device->poll_cq(cq, num_entries, wc); } -/** - * ib_peek_cq - Returns the number of unreaped completions currently - * on the specified CQ. - * @cq: The CQ to peek. - * @wc_cnt: A minimum number of unreaped completions to check for. - * - * If the number of unreaped completions is greater than or equal to wc_cnt, - * this function returns wc_cnt, otherwise, it returns the actual number of - * unreaped completions. - */ -int ib_peek_cq(struct ib_cq *cq, int wc_cnt); - /** * ib_req_notify_cq - Request completion notification on a CQ. * @cq: The CQ to generate an event for. -- cgit v1.2.3-59-g8ed1b From 0ede73bc012c98fba244b33efbc42e48dd23ee9a Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Mon, 19 Mar 2018 15:02:34 +0200 Subject: IB/uverbs: Extend uverbs_ioctl header with driver_id Extending uverbs_ioctl header with driver_id and another reserved field. driver_id should be used in order to identify the driver. Since every driver could have its own parsing tree, this is necessary for strace support. Downstream patches take off the EXPERIMENTAL flag from the ioctl() IB support and thus we add some reserved fields for future usage. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_ioctl.c | 5 ++++- drivers/infiniband/hw/bnxt_re/main.c | 1 + drivers/infiniband/hw/cxgb3/iwch_provider.c | 1 + drivers/infiniband/hw/cxgb4/provider.c | 1 + drivers/infiniband/hw/hfi1/verbs.c | 2 +- drivers/infiniband/hw/hns/hns_roce_main.c | 1 + drivers/infiniband/hw/i40iw/i40iw_verbs.c | 1 + drivers/infiniband/hw/mlx4/main.c | 1 + drivers/infiniband/hw/mlx5/main.c | 1 + drivers/infiniband/hw/mthca/mthca_provider.c | 1 + drivers/infiniband/hw/nes/nes_verbs.c | 1 + drivers/infiniband/hw/ocrdma/ocrdma_main.c | 1 + drivers/infiniband/hw/qedr/main.c | 1 + drivers/infiniband/hw/qib/qib_verbs.c | 2 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 + drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 1 + drivers/infiniband/sw/rdmavt/vt.c | 3 ++- drivers/infiniband/sw/rxe/rxe_verbs.c | 1 + include/rdma/ib_verbs.h | 2 ++ include/rdma/rdma_vt.h | 2 +- include/uapi/rdma/rdma_user_ioctl_cmds.h | 24 +++++++++++++++++++++++- 21 files changed, 48 insertions(+), 6 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 339b85145044..7016e729f139 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -246,6 +246,9 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev, size_t ctx_size; uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)]; + if (hdr->driver_id != ib_dev->driver_id) + return -EINVAL; + object_spec = uverbs_get_object(ib_dev, hdr->object_id); if (!object_spec) return -EPROTONOSUPPORT; @@ -350,7 +353,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto out; } - if (hdr.reserved) { + if (hdr.reserved1 || hdr.reserved2) { err = -EPROTONOSUPPORT; goto out; } diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f6e361750466..abe0be8b5ddc 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -619,6 +619,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats; ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats; + ibdev->driver_id = RDMA_DRIVER_BNXT_RE; return ib_register_device(ibdev, NULL); } diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index a578ca559e11..1804b6c4a6ec 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1439,6 +1439,7 @@ int iwch_register_device(struct iwch_dev *dev) memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name, sizeof(dev->ibdev.iwcm->ifname)); + dev->ibdev.driver_id = RDMA_DRIVER_CXGB3; ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto bail1; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 42568a4df3f8..dc4eabd85f54 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -629,6 +629,7 @@ void c4iw_register_device(struct work_struct *work) memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name, sizeof(dev->ibdev.iwcm->ifname)); + dev->ibdev.driver_id = RDMA_DRIVER_CXGB4; ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto err_kfree_iwcm; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 471d55c50066..c8cf4d4984d3 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1960,7 +1960,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) i, ppd->pkeys); - ret = rvt_register_device(&dd->verbs_dev.rdi); + ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_HFI1); if (ret) goto err_verbs_txreq; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 6e48b1f507cf..83e21f696bbc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -526,6 +526,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) /* OTHERS */ ib_dev->get_port_immutable = hns_roce_port_immutable; + ib_dev->driver_id = RDMA_DRIVER_HNS; ret = ib_register_device(ib_dev, NULL); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 60e004d2100e..40e4f5ab2b46 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2927,6 +2927,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev) return -ENOMEM; iwibdev = iwdev->iwibdev; + iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW; ret = ib_register_device(&iwibdev->ibdev, NULL); if (ret) goto error; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b9befda1eb27..d1be3231f4f0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2955,6 +2955,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (mlx4_ib_alloc_diag_counters(ibdev)) goto err_steer_free_bitmap; + ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4; if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_diag_counters; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d06aae9aa600..6b50711df786 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4779,6 +4779,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.uverbs_ex_cmd_mask |= (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); + dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; err = init_node_data(dev); if (err) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 6fee7795d1c8..541f237965c7 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1295,6 +1295,7 @@ int mthca_register_device(struct mthca_dev *dev) mutex_init(&dev->cap_mask_mutex); + dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA; ret = ib_register_device(&dev->ib_dev, NULL); if (ret) return ret; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 162475aeeedd..1040a6e34230 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3854,6 +3854,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev) struct nes_adapter *nesadapter = nesdev->nesadapter; int i, ret; + nesvnic->nesibdev->ibdev.driver_id = RDMA_DRIVER_NES; ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL); if (ret) { return ret; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 42dc0de54cb8..4547aa28d4ae 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -215,6 +215,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.destroy_srq = ocrdma_destroy_srq; dev->ibdev.post_srq_recv = ocrdma_post_srq_recv; } + dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA; return ib_register_device(&dev->ibdev, NULL); } diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index db4bf97c0e15..f865c0991ad9 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -257,6 +257,7 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; + dev->ibdev.driver_id = RDMA_DRIVER_QEDR; return ib_register_device(&dev->ibdev, NULL); } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index fabee760407e..3977abbc83ad 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1646,7 +1646,7 @@ int qib_register_ib_device(struct qib_devdata *dd) dd->rcd[ctxt]->pkeys); } - ret = rvt_register_device(&dd->verbs_dev.rdi); + ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB); if (ret) goto err_tx; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index f45e99a938e0..aed1ca390e30 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -433,6 +433,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str; + us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC; if (ib_register_device(&us_ibdev->ib_dev, NULL)) goto err_fwd_dealloc; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index d650a9fcde24..4834460e2a0b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -276,6 +276,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) if (!dev->srq_tbl) goto err_qp_free; } + dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; spin_lock_init(&dev->srq_tbl_lock); ret = ib_register_device(&dev->ib_dev, NULL); diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index a67b0ddc2230..434199d0bc96 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -730,7 +730,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) * * Return: 0 on success otherwise an errno. */ -int rvt_register_device(struct rvt_dev_info *rdi) +int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id) { int ret = 0, i; @@ -831,6 +831,7 @@ int rvt_register_device(struct rvt_dev_info *rdi) rdi->ibdev.node_type = RDMA_NODE_IB_CA; rdi->ibdev.num_comp_vectors = 1; + rdi->ibdev.driver_id = driver_id; /* We are now good to announce we exist */ ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback); if (ret) { diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index ced79e49234b..5ef8c3333e43 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1335,6 +1335,7 @@ int rxe_register_device(struct rxe_dev *rxe) } rxe->tfm = tfm; + dev->driver_id = RDMA_DRIVER_RXE; err = ib_register_device(dev, NULL); if (err) { pr_warn("%s failed with error %d\n", __func__, err); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 3cc48f34e3e4..2357f2b29610 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -64,6 +64,7 @@ #include #include #include +#include #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -2385,6 +2386,7 @@ struct ib_device { int comp_vector); struct uverbs_root_spec *specs_root; + enum rdma_driver_id driver_id; }; struct ib_client { diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 4118324a0310..3f4c187e435d 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -538,7 +538,7 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp) struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); void rvt_dealloc_device(struct rvt_dev_info *rdi); -int rvt_register_device(struct rvt_dev_info *rvd); +int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id); void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h index aa1fffe3620b..40063cf970aa 100644 --- a/include/uapi/rdma/rdma_user_ioctl_cmds.h +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -64,8 +64,30 @@ struct ib_uverbs_ioctl_hdr { __u16 object_id; __u16 method_id; __u16 num_attrs; - __aligned_u64 reserved; + __aligned_u64 reserved1; + __u32 driver_id; + __u32 reserved2; struct ib_uverbs_attr attrs[0]; }; +enum rdma_driver_id { + RDMA_DRIVER_UNKNOWN, + RDMA_DRIVER_MLX5, + RDMA_DRIVER_MLX4, + RDMA_DRIVER_CXGB3, + RDMA_DRIVER_CXGB4, + RDMA_DRIVER_MTHCA, + RDMA_DRIVER_BNXT_RE, + RDMA_DRIVER_OCRDMA, + RDMA_DRIVER_NES, + RDMA_DRIVER_I40IW, + RDMA_DRIVER_VMW_PVRDMA, + RDMA_DRIVER_QEDR, + RDMA_DRIVER_HNS, + RDMA_DRIVER_USNIC, + RDMA_DRIVER_RXE, + RDMA_DRIVER_HFI1, + RDMA_DRIVER_QIB, +}; + #endif -- cgit v1.2.3-59-g8ed1b From c66db31113948ba61682f55265df8d032e793fcc Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Mon, 19 Mar 2018 15:02:36 +0200 Subject: IB/uverbs: Safely extend existing attributes Previously, we've used UVERBS_ATTR_SPEC_F_MIN_SZ for extending existing attributes. The behavior of this flag was the kernel accepts anything bigger than the minimum size it specified. This is unsafe, since in order to safely extend an attribute, we need to make sure unknown size is zeroed. Replacing UVERBS_ATTR_SPEC_F_MIN_SZ with UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, which essentially checks that the unknown size is zero. In addition, attributes are now decorated with UVERBS_ATTR_TYPE and UVERBS_ATTR_STRUCT, so we can provide the minimum and known length. Users of this flag needs to use copy_from_or_zero functions/macros. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_ioctl.c | 26 +++++++++- drivers/infiniband/core/uverbs_ioctl_merge.c | 2 +- drivers/infiniband/core/uverbs_std_types.c | 20 +++++--- include/rdma/ib_verbs.h | 13 +++-- include/rdma/uverbs_ioctl.h | 74 ++++++++++++++++++++++------ 5 files changed, 104 insertions(+), 31 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 82a1775ba657..1e6bf2488584 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -35,6 +35,17 @@ #include "rdma_core.h" #include "uverbs.h" +static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, + u16 len) +{ + if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data)) + return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len, + uattr->len - len); + + return !memchr_inv((const void *)&uattr->data + len, + 0, uattr->len - len); +} + static int uverbs_process_attr(struct ib_device *ibdev, struct ib_ucontext *ucontext, const struct ib_uverbs_attr *uattr, @@ -68,9 +79,20 @@ static int uverbs_process_attr(struct ib_device *ibdev, switch (spec->type) { case UVERBS_ATTR_TYPE_PTR_IN: + /* Ensure that any data provided by userspace beyond the known + * struct is zero. Userspace that knows how to use some future + * longer struct will fail here if used with an old kernel and + * non-zero content, making ABI compat/discovery simpler. + */ + if (uattr->len > spec->ptr.len && + spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO && + !uverbs_is_attr_cleared(uattr, spec->ptr.len)) + return -EOPNOTSUPP; + + /* fall through */ case UVERBS_ATTR_TYPE_PTR_OUT: - if (uattr->len < spec->ptr.len || - (!(spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ) && + if (uattr->len < spec->ptr.min_len || + (!(spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO) && uattr->len > spec->ptr.len)) return -EINVAL; diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 62e1eb1d2a28..0f88a1919d51 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c @@ -379,7 +379,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n", min_id) || WARN(IS_ATTR_OBJECT(attr) && - attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ, + attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n", min_id)) { res = -EINVAL; diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index e4a4b184a6bc..0a2d8532de21 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -216,9 +216,11 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_ * spec. */ static const struct uverbs_attr_def uverbs_uhw_compat_in = - UVERBS_ATTR_PTR_IN_SZ(UVERBS_ATTR_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ)); + UVERBS_ATTR_PTR_IN_SZ(UVERBS_ATTR_UHW_IN, UVERBS_ATTR_SIZE(0, USHRT_MAX), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)); static const struct uverbs_attr_def uverbs_uhw_compat_out = - UVERBS_ATTR_PTR_OUT_SZ(UVERBS_ATTR_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ)); + UVERBS_ATTR_PTR_OUT_SZ(UVERBS_ATTR_UHW_OUT, UVERBS_ATTR_SIZE(0, USHRT_MAX), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)); static void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata) @@ -350,17 +352,19 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_CREATE, &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW, UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), - &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE, u32, + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE, + UVERBS_ATTR_TYPE(u32), UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), - &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE, u64, + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE, + UVERBS_ATTR_TYPE(u64), UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), &UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL, UVERBS_ACCESS_READ), - &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, u32, + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, UVERBS_ATTR_TYPE(u32), UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), - &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_FLAGS, u32), - &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE, u32, + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_FLAGS, UVERBS_ATTR_TYPE(u32)), + &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE, UVERBS_ATTR_TYPE(u32), UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), &uverbs_uhw_compat_in, &uverbs_uhw_compat_out); @@ -394,7 +398,7 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_DESTROY, UVERBS_ACCESS_DESTROY, UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP, - struct ib_uverbs_destroy_cq_resp, + UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp), UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY))); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COMP_CHANNEL, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2357f2b29610..e9288d0f627e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2446,11 +2446,9 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; } -static inline bool ib_is_udata_cleared(struct ib_udata *udata, - size_t offset, - size_t len) +static inline bool ib_is_buffer_cleared(const void __user *p, + size_t len) { - const void __user *p = udata->inbuf + offset; bool ret; u8 *buf; @@ -2466,6 +2464,13 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, return ret; } +static inline bool ib_is_udata_cleared(struct ib_udata *udata, + size_t offset, + size_t len) +{ + return ib_is_buffer_cleared(udata->inbuf + offset, len); +} + /** * ib_modify_qp_is_ok - Check that the supplied attribute mask * contains all required attributes and no attributes not allowed for diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index cd7c3e40c6cc..c4ee65b20bb7 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -62,8 +62,8 @@ enum uverbs_obj_access { enum { UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0, - /* Support extending attributes by length */ - UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1, + /* Support extending attributes by length, validate all unknown size == zero */ + UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1, }; /* Specification of a single attribute inside the ioctl message */ @@ -79,7 +79,10 @@ struct uverbs_attr_spec { enum uverbs_attr_type type; /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ u8 flags; + /* Current known size to kernel */ u16 len; + /* User isn't allowed to provide something < min_len */ + u16 min_len; } ptr; struct { enum uverbs_attr_type type; @@ -177,30 +180,41 @@ struct uverbs_object_tree_def { }; #define UA_FLAGS(_flags) .flags = _flags -#define __UVERBS_ATTR0(_id, _len, _type, ...) \ +#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \ ((const struct uverbs_attr_def) \ - {.id = _id, .attr = {{.ptr = {.type = _type, .len = _len, .flags = 0, } }, } }) -#define __UVERBS_ATTR1(_id, _len, _type, _flags) \ + {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } }) +#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \ ((const struct uverbs_attr_def) \ - {.id = _id, .attr = {{.ptr = {.type = _type, .len = _len, _flags } },} }) -#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \ - __UVERBS_ATTR##_n(_id, _len, _type, _flags) + {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} }) +#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \ + ((const struct uverbs_attr_def) \ + {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} }) +#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \ + __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2) + +#define UVERBS_ATTR_TYPE(_type) \ + .min_len = sizeof(_type), .len = sizeof(_type) +#define UVERBS_ATTR_STRUCT(_type, _last) \ + .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type) +#define UVERBS_ATTR_SIZE(_min_len, _len) \ + .min_len = _min_len, .len = _len + /* * In new compiler, UVERBS_ATTR could be simplified by declaring it as * [_id] = {.type = _type, .len = _len, ##__VA_ARGS__} * But since we support older compilers too, we need the more complex code. */ -#define UVERBS_ATTR(_id, _len, _type, ...) \ - __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0) +#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \ + __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0) #define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \ - UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__) + UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__) /* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */ #define UVERBS_ATTR_PTR_IN(_id, _type, ...) \ - UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__) + UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__) #define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \ - UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__) + UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__) #define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \ - UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__) + UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__) /* * In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring @@ -396,8 +410,8 @@ static inline int _uverbs_copy_from(void *to, /* * Validation ensures attr->ptr_attr.len >= size. If the caller is - * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with - * the right size. + * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call + * uverbs_copy_from_or_zero. */ if (unlikely(size < attr->ptr_attr.len)) return -EINVAL; @@ -411,9 +425,37 @@ static inline int _uverbs_copy_from(void *to, return 0; } +static inline int _uverbs_copy_from_or_zero(void *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, + size_t size) +{ + const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); + size_t min_size; + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + min_size = min_t(size_t, size, attr->ptr_attr.len); + + if (uverbs_attr_ptr_is_inline(attr)) + memcpy(to, &attr->ptr_attr.data, min_size); + else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data), + min_size)) + return -EFAULT; + + if (size > min_size) + memset(to + min_size, 0, size - min_size); + + return 0; +} + #define uverbs_copy_from(to, attrs_bundle, idx) \ _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to)) +#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \ + _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to)) + /* ================================================= * Definitions -> Specs infrastructure * ================================================= -- cgit v1.2.3-59-g8ed1b From e945130b52bea65d15f9bdf54949d4cb7a88db7f Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 27 Mar 2018 15:51:05 +0300 Subject: IB/core: Protect against concurrent access to hardware stats Currently access to hardware stats buffer isn't protected, this can result in multiple writes and reads at the same time to the same memory location. This can lead to providing an incorrect value to the user. Add a mutex to protect against it. Fixes: b40f4757daa1 ("IB/core: Make device counter infrastructure dynamic") Signed-off-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/sysfs.c | 34 ++++++++++++++++++++++++++++------ include/rdma/ib_verbs.h | 4 ++++ 2 files changed, 32 insertions(+), 6 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cf36ff1f0068..9b0fbab41dc6 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -811,10 +811,15 @@ static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr, dev = port->ibdev; stats = port->hw_stats; } + mutex_lock(&stats->lock); ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index); if (ret) - return ret; - return print_hw_stat(stats, hsa->index, buf); + goto unlock; + ret = print_hw_stat(stats, hsa->index, buf); +unlock: + mutex_unlock(&stats->lock); + + return ret; } static ssize_t show_stats_lifespan(struct kobject *kobj, @@ -822,17 +827,25 @@ static ssize_t show_stats_lifespan(struct kobject *kobj, char *buf) { struct hw_stats_attribute *hsa; + struct rdma_hw_stats *stats; int msecs; hsa = container_of(attr, struct hw_stats_attribute, attr); if (!hsa->port_num) { struct ib_device *dev = container_of((struct device *)kobj, struct ib_device, dev); - msecs = jiffies_to_msecs(dev->hw_stats->lifespan); + + stats = dev->hw_stats; } else { struct ib_port *p = container_of(kobj, struct ib_port, kobj); - msecs = jiffies_to_msecs(p->hw_stats->lifespan); + + stats = p->hw_stats; } + + mutex_lock(&stats->lock); + msecs = jiffies_to_msecs(stats->lifespan); + mutex_unlock(&stats->lock); + return sprintf(buf, "%d\n", msecs); } @@ -841,6 +854,7 @@ static ssize_t set_stats_lifespan(struct kobject *kobj, const char *buf, size_t count) { struct hw_stats_attribute *hsa; + struct rdma_hw_stats *stats; int msecs; int jiffies; int ret; @@ -855,11 +869,18 @@ static ssize_t set_stats_lifespan(struct kobject *kobj, if (!hsa->port_num) { struct ib_device *dev = container_of((struct device *)kobj, struct ib_device, dev); - dev->hw_stats->lifespan = jiffies; + + stats = dev->hw_stats; } else { struct ib_port *p = container_of(kobj, struct ib_port, kobj); - p->hw_stats->lifespan = jiffies; + + stats = p->hw_stats; } + + mutex_lock(&stats->lock); + stats->lifespan = jiffies; + mutex_unlock(&stats->lock); + return count; } @@ -952,6 +973,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, sysfs_attr_init(hsag->attrs[i]); } + mutex_init(&stats->lock); /* treat an error here as non-fatal */ hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); if (hsag->attrs[i]) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e9288d0f627e..48f416fabe0c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -470,6 +470,9 @@ enum ib_port_speed { /** * struct rdma_hw_stats + * @lock - Mutex to protect parallel write access to lifespan and values + * of counters, which are 64bits and not guaranteeed to be written + * atomicaly on 32bits systems. * @timestamp - Used by the core code to track when the last update was * @lifespan - Used by the core code to determine how old the counters * should be before being updated again. Stored in jiffies, defaults @@ -485,6 +488,7 @@ enum ib_port_speed { * filled in by the drivers get_stats routine */ struct rdma_hw_stats { + struct mutex lock; /* Protect lifespan and values[] */ unsigned long timestamp; unsigned long lifespan; const char * const *names; -- cgit v1.2.3-59-g8ed1b From 72e1ff0fb7e09c34956e4b3f619481da4d9787c1 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 1 Apr 2018 15:08:18 +0300 Subject: RDMA/core: Update query_gid documentation for HCA drivers query_gid() should return right GID value for iWarp and IB link layers. It is a no-op for RoCE link layer. Update the documentation to reflect this. Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/rdma/ib_verbs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/rdma/ib_verbs.h') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 48f416fabe0c..a2f658125795 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2141,6 +2141,10 @@ struct ib_device { */ struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); + /* query_gid should be return GID value for @device, when @port_num + * link layer is either IB or iWarp. It is no-op if @port_num port + * is RoCE link layer. + */ int (*query_gid)(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3-59-g8ed1b From 598ff6bae689453aa894bc38f3f1bb78eb131a61 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 1 Apr 2018 15:08:21 +0300 Subject: IB/core: Refactor GID modify code for RoCE Code is refactored to prepare separate functions for RoCE which can do more complex operations related to reference counting, while still maintainining code readability. This includes (a) Simplification to not perform netdevice checks and modifications for IB link layer. (b) Do not add RoCE GID entry which has NULL netdevice; instead return an error. (c) If GID addition fails at provider level add_gid(), do not add the entry in the cache and keep the entry marked as INVALID. (d) Simplify and reuse the ib_cache_gid_add()/del() routines so that they can be used even for modifying default GIDs. This avoid some code duplication in modifying default GIDs. (e) find_gid() routine refers to the data entry flags to qualify a GID as valid or invalid GID rather than depending on attributes and zeroness of the GID content. (f) gid_table_reserve_default() sets the GID default attribute at beginning while setting up the GID table. There is no need to use default_gid flag in low level functions such as write_gid(), add_gid(), del_gid(), as they never need to update the DEFAULT property of the GID entry while during GID table update. As as result of this refactor, reserved GID 0:0:0:0:0:0:0:0 is no longer searchable as described below. A unicast GID entry of 0:0:0:0:0:0:0:0 is Reserved GID as per the IB spec version 1.3 section 4.1.1, point (6) whose snippet is below. "The unicast GID address 0:0:0:0:0:0:0:0 is reserved - referred to as the Reserved GID. It shall never be assigned to any endport. It shall not be used as a destination address or in a global routing header (GRH)." GID table cache now only stores valid GID entries. Before this patch, Reserved GID 0:0:0:0:0:0:0:0 was searchable in the GID table using ib_find_cached_gid_by_port() and other similar find routines. Zero GID is no longer searchable as it shall not to be present in GRH or path recored entry as described in IB spec version 1.3 section 4.1.1, point (6), section 12.7.10 and section 12.7.20. ib_cache_update() is simplified to check link layer once, use unified locking scheme for all link layers, removed temporary gid table allocation/free logic. Additionally, (a) Expand ib_gid_attr to store port and index so that GID query routines can get port and index information from the attribute structure. (b) Expand ib_gid_attr to store device as well so that in future code when GID reference counting is done, device is used to reach back to the GID table entry. Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 484 +++++++++++++++++++++------------------- drivers/infiniband/core/sysfs.c | 18 +- include/rdma/ib_verbs.h | 5 +- 3 files changed, 272 insertions(+), 235 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e03eaf0c7527..045ca11fa135 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -59,8 +59,6 @@ struct ib_update_work { union ib_gid zgid; EXPORT_SYMBOL(zgid); -static const struct ib_gid_attr zattr; - enum gid_attr_find_mask { GID_ATTR_FIND_MASK_GID = 1UL << 0, GID_ATTR_FIND_MASK_NETDEV = 1UL << 1, @@ -73,15 +71,6 @@ enum gid_table_entry_props { GID_TABLE_ENTRY_DEFAULT = 1UL << 1, }; -enum gid_table_write_action { - GID_TABLE_WRITE_ACTION_ADD, - GID_TABLE_WRITE_ACTION_DEL, - /* MODIFY only updates the GID table. Currently only used by - * ib_cache_update. - */ - GID_TABLE_WRITE_ACTION_MODIFY -}; - struct ib_gid_table_entry { unsigned long props; union ib_gid gid; @@ -100,16 +89,13 @@ struct ib_gid_table { * (a) Find the GID * (b) Delete it. * - * Add/delete should be carried out atomically. - * This is done by locking this mutex from multiple - * writers. We don't need this lock for IB, as the MAD - * layer replaces all entries. All data_vec entries - * are locked by this lock. **/ - struct mutex lock; - /* This lock protects the table entries from being - * read and written simultaneously. + /* Any writer to data_vec must hold this lock and the write side of + * rwlock. readers must hold only rwlock. All writers must be in a + * sleepable context. */ + struct mutex lock; + /* rwlock protects data_vec[ix]->props. */ rwlock_t rwlock; struct ib_gid_table_entry *data_vec; }; @@ -163,94 +149,128 @@ int ib_cache_gid_parse_type_str(const char *buf) } EXPORT_SYMBOL(ib_cache_gid_parse_type_str); -/* This function expects that rwlock will be write locked in all - * scenarios and that lock will be locked in sleep-able (RoCE) - * scenarios. - */ -static int write_gid(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table, int ix, - const union ib_gid *gid, - const struct ib_gid_attr *attr, - enum gid_table_write_action action, - bool default_gid) - __releases(&table->rwlock) __acquires(&table->rwlock) +static void del_roce_gid(struct ib_device *device, u8 port_num, + struct ib_gid_table *table, int ix) { - int ret = 0; - struct net_device *old_net_dev; - enum ib_gid_type old_gid_type; + pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, + device->name, port_num, ix, + table->data_vec[ix].gid.raw); + + if (rdma_cap_roce_gid_table(device, port_num)) + device->del_gid(device, port_num, ix, + &table->data_vec[ix].context); + dev_put(table->data_vec[ix].attr.ndev); +} - /* in rdma_cap_roce_gid_table, this funciton should be protected by a - * sleep-able lock. - */ +static int add_roce_gid(struct ib_gid_table *table, + const union ib_gid *gid, + const struct ib_gid_attr *attr) +{ + struct ib_gid_table_entry *entry; + int ix = attr->index; + int ret = 0; - if (rdma_cap_roce_gid_table(ib_dev, port)) { - table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID; - write_unlock_irq(&table->rwlock); - /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by - * RoCE providers and thus only updates the cache. - */ - if (action == GID_TABLE_WRITE_ACTION_ADD) - ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr, - &table->data_vec[ix].context); - else if (action == GID_TABLE_WRITE_ACTION_DEL) - ret = ib_dev->del_gid(ib_dev, port, ix, - &table->data_vec[ix].context); - write_lock_irq(&table->rwlock); + if (!attr->ndev) { + pr_err("%s NULL netdev device=%s port=%d index=%d\n", + __func__, attr->device->name, attr->port_num, + attr->index); + return -EINVAL; } - old_net_dev = table->data_vec[ix].attr.ndev; - old_gid_type = table->data_vec[ix].attr.gid_type; - if (old_net_dev && old_net_dev != attr->ndev) - dev_put(old_net_dev); - /* if modify_gid failed, just delete the old gid */ - if (ret || action == GID_TABLE_WRITE_ACTION_DEL) { - gid = &zgid; - attr = &zattr; - table->data_vec[ix].context = NULL; + entry = &table->data_vec[ix]; + if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) { + WARN(1, "GID table corruption device=%s port=%d index=%d\n", + attr->device->name, attr->port_num, + attr->index); + return -EINVAL; } - memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); - memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); - if (default_gid) { - table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; - if (action == GID_TABLE_WRITE_ACTION_DEL) - table->data_vec[ix].attr.gid_type = old_gid_type; + if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { + ret = attr->device->add_gid(attr->device, attr->port_num, + ix, gid, attr, &entry->context); + if (ret) { + pr_err("%s GID add failed device=%s port=%d index=%d\n", + __func__, attr->device->name, attr->port_num, + attr->index); + goto add_err; + } } - if (table->data_vec[ix].attr.ndev && - table->data_vec[ix].attr.ndev != old_net_dev) - dev_hold(table->data_vec[ix].attr.ndev); - - table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID; + dev_hold(attr->ndev); +add_err: + if (!ret) + pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, + attr->device->name, attr->port_num, ix, gid->raw); return ret; } -static int add_gid(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table, int ix, - const union ib_gid *gid, - const struct ib_gid_attr *attr, - bool default_gid) { - return write_gid(ib_dev, port, table, ix, gid, attr, - GID_TABLE_WRITE_ACTION_ADD, default_gid); -} +/** + * add_modify_gid - Add or modify GID table entry + * + * @table: GID table in which GID to be added or modified + * @gid: GID content + * @attr: Attributes of the GID + * + * Returns 0 on success or appropriate error code. It accepts zero + * GID addition for non RoCE ports for HCA's who report them as valid + * GID. However such zero GIDs are not added to the cache. + */ +static int add_modify_gid(struct ib_gid_table *table, + const union ib_gid *gid, + const struct ib_gid_attr *attr) +{ + int ret; + + if (rdma_protocol_roce(attr->device, attr->port_num)) { + ret = add_roce_gid(table, gid, attr); + if (ret) + return ret; + } else { + /* + * Some HCA's report multiple GID entries with only one + * valid GID, but remaining as zero GID. + * So ignore such behavior for IB link layer and don't + * fail the call, but don't add such entry to GID cache. + */ + if (!memcmp(gid, &zgid, sizeof(*gid))) + return 0; + } + + lockdep_assert_held(&table->lock); + memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid)); + memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr)); -static int modify_gid(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table, int ix, - const union ib_gid *gid, - const struct ib_gid_attr *attr, - bool default_gid) { - return write_gid(ib_dev, port, table, ix, gid, attr, - GID_TABLE_WRITE_ACTION_MODIFY, default_gid); + write_lock_irq(&table->rwlock); + table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID; + write_unlock_irq(&table->rwlock); + return 0; } -static int del_gid(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table, int ix, - bool default_gid) { - return write_gid(ib_dev, port, table, ix, &zgid, &zattr, - GID_TABLE_WRITE_ACTION_DEL, default_gid); +/** + * del_gid - Delete GID table entry + * + * @ib_dev: IB device whose GID entry to be deleted + * @port: Port number of the IB device + * @table: GID table of the IB device for a port + * @ix: GID entry index to delete + * + */ +static void del_gid(struct ib_device *ib_dev, u8 port, + struct ib_gid_table *table, int ix) +{ + lockdep_assert_held(&table->lock); + write_lock_irq(&table->rwlock); + table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID; + write_unlock_irq(&table->rwlock); + + if (rdma_protocol_roce(ib_dev, port)) + del_roce_gid(ib_dev, port, table, ix); + memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid)); + memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr)); + table->data_vec[ix].context = NULL; } -/* rwlock should be read locked */ +/* rwlock should be read locked, or lock should be held */ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, const struct ib_gid_attr *val, bool default_gid, unsigned long mask, int *pempty) @@ -266,15 +286,32 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, i++; + /* find_gid() is used during GID addition where it is expected + * to return a free entry slot which is not duplicate. + * Free entry slot is requested and returned if pempty is set, + * so lookup free slot only if requested. + */ + if (pempty && empty < 0) { + if (data->props & GID_TABLE_ENTRY_INVALID) { + /* Found an invalid (free) entry; allocate it */ + if (data->props & GID_TABLE_ENTRY_DEFAULT) { + if (default_gid) + empty = curr_index; + } else { + empty = curr_index; + } + } + } + + /* + * Additionally find_gid() is used to find valid entry during + * lookup operation, where validity needs to be checked. So + * find the empty entry first to continue to search for a free + * slot and ignore its INVALID flag. + */ if (data->props & GID_TABLE_ENTRY_INVALID) continue; - if (empty < 0) - if (!memcmp(&data->gid, &zgid, sizeof(*gid)) && - !memcmp(attr, &zattr, sizeof(*attr)) && - !data->props) - empty = curr_index; - if (found >= 0) continue; @@ -310,20 +347,56 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid) addrconf_ifid_eui48(&gid->raw[8], dev); } -int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, - union ib_gid *gid, struct ib_gid_attr *attr) +static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr, + unsigned long mask, bool default_gid) { struct ib_gid_table *table; - int ix; int ret = 0; - struct net_device *idev; int empty; + int ix; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; - + /* Do not allow adding zero GID in support of + * IB spec version 1.3 section 4.1.1 point (6) and + * section 12.7.10 and section 12.7.20 + */ if (!memcmp(gid, &zgid, sizeof(*gid))) return -EINVAL; + table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + + mutex_lock(&table->lock); + + ix = find_gid(table, gid, attr, default_gid, mask, &empty); + if (ix >= 0) + goto out_unlock; + + if (empty < 0) { + ret = -ENOSPC; + goto out_unlock; + } + attr->device = ib_dev; + attr->index = empty; + attr->port_num = port; + ret = add_modify_gid(table, gid, attr); + if (!ret) + dispatch_gid_change_event(ib_dev, port); + +out_unlock: + mutex_unlock(&table->lock); + if (ret) + pr_warn("%s: unable to add gid %pI6 error=%d\n", + __func__, gid->raw, ret); + return ret; +} + +int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr) +{ + struct net_device *idev; + unsigned long mask; + int ret; + if (ib_dev->get_netdev) { idev = ib_dev->get_netdev(ib_dev, port); if (idev && attr->ndev != idev) { @@ -340,27 +413,11 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, dev_put(idev); } - mutex_lock(&table->lock); - write_lock_irq(&table->rwlock); - - ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID | - GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_NETDEV, &empty); - if (ix >= 0) - goto out_unlock; - - if (empty < 0) { - ret = -ENOSPC; - goto out_unlock; - } - - ret = add_gid(ib_dev, port, table, empty, gid, attr, false); - if (!ret) - dispatch_gid_change_event(ib_dev, port); + mask = GID_ATTR_FIND_MASK_GID | + GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_NETDEV; -out_unlock: - write_unlock_irq(&table->rwlock); - mutex_unlock(&table->lock); + ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false); return ret; } @@ -368,29 +425,32 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, union ib_gid *gid, struct ib_gid_attr *attr) { struct ib_gid_table *table; + int ret = 0; int ix; table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; mutex_lock(&table->lock); - write_lock_irq(&table->rwlock); ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_NETDEV | - GID_ATTR_FIND_MASK_DEFAULT, + GID_ATTR_FIND_MASK_NETDEV, NULL); - if (ix < 0) + if (ix < 0) { + ret = -EINVAL; goto out_unlock; + } - if (!del_gid(ib_dev, port, table, ix, false)) - dispatch_gid_change_event(ib_dev, port); + del_gid(ib_dev, port, table, ix); + dispatch_gid_change_event(ib_dev, port); out_unlock: - write_unlock_irq(&table->rwlock); mutex_unlock(&table->lock); - return 0; + if (ret) + pr_debug("%s: can't delete gid %pI6 error=%d\n", + __func__, gid->raw, ret); + return ret; } int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, @@ -403,16 +463,14 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; mutex_lock(&table->lock); - write_lock_irq(&table->rwlock); - for (ix = 0; ix < table->sz; ix++) - if (table->data_vec[ix].attr.ndev == ndev) - if (!del_gid(ib_dev, port, table, ix, - !!(table->data_vec[ix].props & - GID_TABLE_ENTRY_DEFAULT))) - deleted = true; + for (ix = 0; ix < table->sz; ix++) { + if (table->data_vec[ix].attr.ndev == ndev) { + del_gid(ib_dev, port, table, ix); + deleted = true; + } + } - write_unlock_irq(&table->rwlock); mutex_unlock(&table->lock); if (deleted) @@ -609,6 +667,7 @@ static struct ib_gid_table *alloc_gid_table(int sz) { struct ib_gid_table *table = kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL); + int i; if (!table) return NULL; @@ -622,6 +681,11 @@ static struct ib_gid_table *alloc_gid_table(int sz) table->sz = sz; rwlock_init(&table->rwlock); + /* Mark all entries as invalid so that allocator can allocate + * one of the invalid (free) entry. + */ + for (i = 0; i < sz; i++) + table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID; return table; err_free_table: @@ -646,16 +710,15 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, if (!table) return; - write_lock_irq(&table->rwlock); + mutex_lock(&table->lock); for (i = 0; i < table->sz; ++i) { if (memcmp(&table->data_vec[i].gid, &zgid, - sizeof(table->data_vec[i].gid))) - if (!del_gid(ib_dev, port, table, i, - table->data_vec[i].props & - GID_ATTR_FIND_MASK_DEFAULT)) - deleted = true; + sizeof(table->data_vec[i].gid))) { + del_gid(ib_dev, port, table, i); + deleted = true; + } } - write_unlock_irq(&table->rwlock); + mutex_unlock(&table->lock); if (deleted) dispatch_gid_change_event(ib_dev, port); @@ -668,9 +731,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, { union ib_gid gid; struct ib_gid_attr gid_attr; - struct ib_gid_attr zattr_type = zattr; struct ib_gid_table *table; unsigned int gid_type; + unsigned long mask; table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; @@ -679,60 +742,19 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, gid_attr.ndev = ndev; for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) { - int ix; - union ib_gid current_gid; - struct ib_gid_attr current_gid_attr = {}; - if (1UL << gid_type & ~gid_type_mask) continue; gid_attr.gid_type = gid_type; - mutex_lock(&table->lock); - write_lock_irq(&table->rwlock); - ix = find_gid(table, NULL, &gid_attr, true, - GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_DEFAULT, - NULL); - - /* Coudn't find default GID location */ - if (WARN_ON(ix < 0)) - goto release; - - zattr_type.gid_type = gid_type; - - if (!__ib_cache_gid_get(ib_dev, port, ix, - ¤t_gid, ¤t_gid_attr) && - mode == IB_CACHE_GID_DEFAULT_MODE_SET && - !memcmp(&gid, ¤t_gid, sizeof(gid)) && - !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr))) - goto release; - - if (memcmp(¤t_gid, &zgid, sizeof(current_gid)) || - memcmp(¤t_gid_attr, &zattr_type, - sizeof(current_gid_attr))) { - if (del_gid(ib_dev, port, table, ix, true)) { - pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n", - ix, gid.raw); - goto release; - } else { - dispatch_gid_change_event(ib_dev, port); - } - } - if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { - if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true)) - pr_warn("ib_cache_gid: unable to add default gid %pI6\n", - gid.raw); - else - dispatch_gid_change_event(ib_dev, port); + mask = GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT; + __ib_cache_gid_add(ib_dev, port, &gid, + &gid_attr, mask, true); + } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { + ib_cache_gid_del(ib_dev, port, &gid, &gid_attr); } - -release: - if (current_gid_attr.ndev) - dev_put(current_gid_attr.ndev); - write_unlock_irq(&table->rwlock); - mutex_unlock(&table->lock); } } @@ -1057,25 +1079,50 @@ int ib_get_cached_port_state(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_port_state); +static int config_non_roce_gid_cache(struct ib_device *device, + u8 port, int gid_tbl_len) +{ + struct ib_gid_attr gid_attr = {}; + struct ib_gid_table *table; + union ib_gid gid; + int ret = 0; + int i; + + gid_attr.device = device; + gid_attr.port_num = port; + table = device->cache.ports[port - rdma_start_port(device)].gid; + + mutex_lock(&table->lock); + for (i = 0; i < gid_tbl_len; ++i) { + if (!device->query_gid) + continue; + ret = device->query_gid(device, port, i, &gid); + if (ret) { + pr_warn("query_gid failed (%d) for %s (index %d)\n", + ret, device->name, i); + goto err; + } + gid_attr.index = i; + add_modify_gid(table, &gid, &gid_attr); + } +err: + mutex_unlock(&table->lock); + return ret; +} + static void ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; - struct ib_gid_cache { - int table_len; - union ib_gid table[0]; - } *gid_cache = NULL; int i; int ret; struct ib_gid_table *table; - bool use_roce_gid_table; if (!rdma_is_port_valid(device, port)) return; - use_roce_gid_table = rdma_protocol_roce(device, port); table = device->cache.ports[port - rdma_start_port(device)].gid; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); @@ -1089,6 +1136,13 @@ static void ib_cache_update(struct ib_device *device, goto err; } + if (!rdma_protocol_roce(device, port)) { + ret = config_non_roce_gid_cache(device, port, + tprops->gid_tbl_len); + if (ret) + goto err; + } + pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, GFP_KERNEL); if (!pkey_cache) @@ -1096,15 +1150,6 @@ static void ib_cache_update(struct ib_device *device, pkey_cache->table_len = tprops->pkey_tbl_len; - if (!use_roce_gid_table) { - gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len * - sizeof(*gid_cache->table), GFP_KERNEL); - if (!gid_cache) - goto err; - - gid_cache->table_len = tprops->gid_tbl_len; - } - for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { @@ -1114,33 +1159,12 @@ static void ib_cache_update(struct ib_device *device, } } - if (!use_roce_gid_table) { - for (i = 0; i < gid_cache->table_len; ++i) { - ret = device->query_gid(device, port, i, - gid_cache->table + i); - if (ret) { - pr_warn("ib_query_gid failed (%d) for %s (index %d)\n", - ret, device->name, i); - goto err; - } - } - } - write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.ports[port - rdma_start_port(device)].pkey; device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache; - if (!use_roce_gid_table) { - write_lock(&table->rwlock); - for (i = 0; i < gid_cache->table_len; i++) { - modify_gid(device, port, table, i, gid_cache->table + i, - &zattr, false); - } - write_unlock(&table->rwlock); - } - device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc; device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; @@ -1154,14 +1178,12 @@ static void ib_cache_update(struct ib_device *device, port, tprops->subnet_prefix); - kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); return; err: kfree(pkey_cache); - kfree(gid_cache); kfree(tprops); } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 9b0fbab41dc6..31c7efaf8e7a 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -389,14 +389,26 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, { struct port_table_attribute *tab_attr = container_of(attr, struct port_table_attribute, attr); + union ib_gid *pgid; union ib_gid gid; ssize_t ret; ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, NULL); - if (ret) - return ret; - return sprintf(buf, "%pI6\n", gid.raw); + /* If reading GID fails, it is likely due to GID entry being empty + * (invalid) or reserved GID in the table. + * User space expects to read GID table entries as long as it given + * index is within GID table size. + * Administrative/debugging tool fails to query rest of the GID entries + * if it hits error while querying a GID of the given index. + * To avoid user space throwing such error on fail to read gid, return + * zero GID as before. This maintains backward compatibility. + */ + if (ret) + pgid = &zgid; + else + pgid = &gid; + return sprintf(buf, "%pI6\n", pgid->raw); } static ssize_t show_port_gid_attr_ndev(struct ib_port *p, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a2f658125795..dc2541f13d7f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -91,8 +91,11 @@ enum ib_gid_type { #define ROCE_V2_UDP_DPORT 4791 struct ib_gid_attr { - enum ib_gid_type gid_type; struct net_device *ndev; + struct ib_device *device; + enum ib_gid_type gid_type; + u16 index; + u8 port_num; }; enum rdma_node_type { -- cgit v1.2.3-59-g8ed1b From 414448d249d82c9be93b35e61e0303e84ef2f959 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 1 Apr 2018 15:08:24 +0300 Subject: RDMA: Use ib_gid_attr during GID modification Now that ib_gid_attr contains device, port and index, simplify the provider APIs add_gid() and del_gid() to use device, port and index fields from the ib_gid_attr attributes structure. Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 5 ++-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 10 ++++---- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 6 ++--- drivers/infiniband/hw/hns/hns_roce_main.c | 20 +++++++--------- drivers/infiniband/hw/mlx4/main.c | 30 ++++++++++------------- drivers/infiniband/hw/mlx5/main.c | 13 +++++----- drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 31 +++++++----------------- drivers/infiniband/sw/rxe/rxe_verbs.c | 10 ++++---- include/rdma/ib_verbs.h | 33 +++++++++++--------------- 9 files changed, 63 insertions(+), 95 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 045ca11fa135..e337b08de2ff 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -157,7 +157,7 @@ static void del_roce_gid(struct ib_device *device, u8 port_num, table->data_vec[ix].gid.raw); if (rdma_cap_roce_gid_table(device, port_num)) - device->del_gid(device, port_num, ix, + device->del_gid(&table->data_vec[ix].attr, &table->data_vec[ix].context); dev_put(table->data_vec[ix].attr.ndev); } @@ -186,8 +186,7 @@ static int add_roce_gid(struct ib_gid_table *table, } if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { - ret = attr->device->add_gid(attr->device, attr->port_num, - ix, gid, attr, &entry->context); + ret = attr->device->add_gid(gid, attr, &entry->context); if (ret) { pr_err("%s GID add failed device=%s port=%d index=%d\n", __func__, attr->device->name, attr->port_num, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 63a0e08dd6fe..a76e206704d4 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -314,12 +314,11 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, return rc; } -int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, - unsigned int index, void **context) +int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) { int rc = 0; struct bnxt_re_gid_ctx *ctx, **ctx_tbl; - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_gid *gid_to_del; @@ -365,15 +364,14 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, return rc; } -int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, - unsigned int index, const union ib_gid *gid, +int bnxt_re_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context) { int rc; u32 tbl_idx = 0; u16 vlan_id = 0xFFFF; struct bnxt_re_gid_ctx *ctx, **ctx_tbl; - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; if ((attr->ndev) && is_vlan_dev(attr->ndev)) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index e62b7c2c7da6..5c6414cad4af 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -157,10 +157,8 @@ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str); int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, u16 *pkey); -int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, - unsigned int index, void **context); -int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, - unsigned int index, const union ib_gid *gid, +int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context); +int bnxt_re_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context); int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, int index, union ib_gid *gid); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 76e2e5b41895..9d48bc07a9e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -74,12 +74,11 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) return hr_dev->hw->set_mac(hr_dev, phy_port, addr); } -static int hns_roce_add_gid(struct ib_device *device, u8 port_num, - unsigned int index, const union ib_gid *gid, +static int hns_roce_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context) { - struct hns_roce_dev *hr_dev = to_hr_dev(device); - u8 port = port_num - 1; + struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); + u8 port = attr->port_num - 1; unsigned long flags; int ret; @@ -88,21 +87,20 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num, spin_lock_irqsave(&hr_dev->iboe.lock, flags); - ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid, - attr); + ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, + (union ib_gid *)gid, attr); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); return ret; } -static int hns_roce_del_gid(struct ib_device *device, u8 port_num, - unsigned int index, void **context) +static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) { - struct hns_roce_dev *hr_dev = to_hr_dev(device); + struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); struct ib_gid_attr zattr = { }; union ib_gid zgid = { {0} }; - u8 port = port_num - 1; + u8 port = attr->port_num - 1; unsigned long flags; int ret; @@ -111,7 +109,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num, spin_lock_irqsave(&hr_dev->iboe.lock, flags); - ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr); + ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a31a3edfbf28..8eca09b53fe8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -246,14 +246,11 @@ static int mlx4_ib_update_gids(struct gid_entry *gids, return mlx4_ib_update_gids_v1(gids, ibdev, port_num); } -static int mlx4_ib_add_gid(struct ib_device *device, - u8 port_num, - unsigned int index, - const union ib_gid *gid, +static int mlx4_ib_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context) { - struct mlx4_ib_dev *ibdev = to_mdev(device); + struct mlx4_ib_dev *ibdev = to_mdev(attr->device); struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct mlx4_port_gid_table *port_gid_table; int free = -1, found = -1; @@ -262,16 +259,16 @@ static int mlx4_ib_add_gid(struct ib_device *device, int i; struct gid_entry *gids = NULL; - if (!rdma_cap_roce_gid_table(device, port_num)) + if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) return -EINVAL; - if (port_num > MLX4_MAX_PORTS) + if (attr->port_num > MLX4_MAX_PORTS) return -EINVAL; if (!context) return -EINVAL; - port_gid_table = &iboe->gids[port_num - 1]; + port_gid_table = &iboe->gids[attr->port_num - 1]; spin_lock_bh(&iboe->lock); for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) && @@ -318,33 +315,30 @@ static int mlx4_ib_add_gid(struct ib_device *device, spin_unlock_bh(&iboe->lock); if (!ret && hw_update) { - ret = mlx4_ib_update_gids(gids, ibdev, port_num); + ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); kfree(gids); } return ret; } -static int mlx4_ib_del_gid(struct ib_device *device, - u8 port_num, - unsigned int index, - void **context) +static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) { struct gid_cache_context *ctx = *context; - struct mlx4_ib_dev *ibdev = to_mdev(device); + struct mlx4_ib_dev *ibdev = to_mdev(attr->device); struct mlx4_ib_iboe *iboe = &ibdev->iboe; struct mlx4_port_gid_table *port_gid_table; int ret = 0; int hw_update = 0; struct gid_entry *gids = NULL; - if (!rdma_cap_roce_gid_table(device, port_num)) + if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) return -EINVAL; - if (port_num > MLX4_MAX_PORTS) + if (attr->port_num > MLX4_MAX_PORTS) return -EINVAL; - port_gid_table = &iboe->gids[port_num - 1]; + port_gid_table = &iboe->gids[attr->port_num - 1]; spin_lock_bh(&iboe->lock); if (ctx) { ctx->refcount--; @@ -376,7 +370,7 @@ static int mlx4_ib_del_gid(struct ib_device *device, spin_unlock_bh(&iboe->lock); if (!ret && hw_update) { - ret = mlx4_ib_update_gids(gids, ibdev, port_num); + ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); kfree(gids); } return ret; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9e6780eadd1e..bc9eabd95948 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -502,18 +502,19 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, vlan_id, port_num); } -static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, - unsigned int index, const union ib_gid *gid, +static int mlx5_ib_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(to_mdev(device), port_num, index, gid, attr); + return set_roce_addr(to_mdev(attr->device), attr->port_num, + attr->index, gid, attr); } -static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, - unsigned int index, __always_unused void **context) +static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, + __always_unused void **context) { - return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL); + return set_roce_addr(to_mdev(attr->device), attr->port_num, + attr->index, NULL, NULL); } __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 4834460e2a0b..0be33a81bbe6 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -62,17 +62,10 @@ static DEFINE_MUTEX(pvrdma_device_list_lock); static LIST_HEAD(pvrdma_device_list); static struct workqueue_struct *event_wq; -static int pvrdma_add_gid(struct ib_device *ibdev, - u8 port_num, - unsigned int index, - const union ib_gid *gid, +static int pvrdma_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context); -static int pvrdma_del_gid(struct ib_device *ibdev, - u8 port_num, - unsigned int index, - void **context); - +static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context); static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) @@ -657,18 +650,15 @@ static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, return 0; } -static int pvrdma_add_gid(struct ib_device *ibdev, - u8 port_num, - unsigned int index, - const union ib_gid *gid, +static int pvrdma_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context) { - struct pvrdma_dev *dev = to_vdev(ibdev); + struct pvrdma_dev *dev = to_vdev(attr->device); return pvrdma_add_gid_at_index(dev, gid, ib_gid_type_to_pvrdma(attr->gid_type), - index); + attr->index); } static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) @@ -698,17 +688,14 @@ static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) return 0; } -static int pvrdma_del_gid(struct ib_device *ibdev, - u8 port_num, - unsigned int index, - void **context) +static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context) { - struct pvrdma_dev *dev = to_vdev(ibdev); + struct pvrdma_dev *dev = to_vdev(attr->device); dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", - index, dev->netdev->name); + attr->index, dev->netdev->name); - return pvrdma_del_gid_at_index(dev, index); + return pvrdma_del_gid_at_index(dev, attr->index); } static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0661c2783b14..08f3e0618b81 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -77,19 +77,17 @@ out: return rc; } -static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int - index, const union ib_gid *gid, +static int rxe_add_gid(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context) { - if (index >= RXE_PORT_GID_TBL_LEN) + if (attr->index >= RXE_PORT_GID_TBL_LEN) return -EINVAL; return 0; } -static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int - index, void **context) +static int rxe_del_gid(const struct ib_gid_attr *attr, void **context) { - if (index >= RXE_PORT_GID_TBL_LEN) + if (attr->index >= RXE_PORT_GID_TBL_LEN) return -EINVAL; return 0; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index dc2541f13d7f..1e3059ce73b6 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2151,34 +2151,29 @@ struct ib_device { int (*query_gid)(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); - /* When calling add_gid, the HW vendor's driver should - * add the gid of device @device at gid index @index of - * port @port_num to be @gid. Meta-info of that gid (for example, - * the network device related to this gid is available - * at @attr. @context allows the HW vendor driver to store extra - * information together with a GID entry. The HW vendor may allocate - * memory to contain this information and store it in @context when a - * new GID entry is written to. Params are consistent until the next - * call of add_gid or delete_gid. The function should return 0 on + /* When calling add_gid, the HW vendor's driver should add the gid + * of device of port at gid index available at @attr. Meta-info of + * that gid (for example, the network device related to this gid) is + * available at @attr. @context allows the HW vendor driver to store + * extra information together with a GID entry. The HW vendor driver may + * allocate memory to contain this information and store it in @context + * when a new GID entry is written to. Params are consistent until the + * next call of add_gid or delete_gid. The function should return 0 on * success or error otherwise. The function could be called - * concurrently for different ports. This function is only called - * when roce_gid_table is used. + * concurrently for different ports. This function is only called when + * roce_gid_table is used. */ - int (*add_gid)(struct ib_device *device, - u8 port_num, - unsigned int index, - const union ib_gid *gid, + int (*add_gid)(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context); /* When calling del_gid, the HW vendor's driver should delete the - * gid of device @device at gid index @index of port @port_num. + * gid of device @device at gid index gid_index of port port_num + * available in @attr. * Upon the deletion of a GID entry, the HW vendor must free any * allocated memory. The caller will clear @context afterwards. * This function is only called when roce_gid_table is used. */ - int (*del_gid)(struct ib_device *device, - u8 port_num, - unsigned int index, + int (*del_gid)(const struct ib_gid_attr *attr, void **context); int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, u16 *pkey); -- cgit v1.2.3-59-g8ed1b From 2eb9beaee5d73130d28c54e91eecb8a186581e08 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Wed, 28 Mar 2018 09:27:45 +0300 Subject: IB/uverbs: Add flow_action create and destroy verbs A verbs application may receive and transmits packets using a data path pipeline. Sometimes, the first stage in the receive pipeline or the last stage in the transmit pipeline involves transforming a packet, either in order to make it easier for later stages to process it or to prepare it for transmission over the wire. Such transformation could be stripping/encapsulating the packet (i.e. vxlan), decrypting/encrypting it (i.e. ipsec), altering headers, doing some complex FPGA changes, etc. Some hardware could do such transformations without software data path intervention at all. The flow steering API supports steering a packet (either to a QP or dropping it) and some simple packet immutable actions (i.e. tagging a packet). Complex actions, that may change the packet, could bloat the flow steering API extensively. Sometimes the same action should be applied to several flows. In this case, it's easier to bind several flows to the same action and modify it than change all matching flows. Introducing a new flow_action object that abstracts any packet transformation (out of a standard and well defined set of actions). This flow_action object could be tied to a flow steering rule via a new specification. Currently, we support esp flow_action, which encrypts or decrypts a packet according to the given parameters. However, we present a flexible schema that could be used to other transformation actions tied to flow rules. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/Makefile | 3 +- drivers/infiniband/core/uverbs.h | 4 + drivers/infiniband/core/uverbs_std_types.c | 10 +- .../infiniband/core/uverbs_std_types_flow_action.c | 348 +++++++++++++++++++++ include/rdma/ib_verbs.h | 65 ++++ include/uapi/rdma/ib_user_ioctl_cmds.h | 19 ++ include/uapi/rdma/ib_user_ioctl_verbs.h | 59 ++++ 7 files changed, 506 insertions(+), 2 deletions(-) create mode 100644 drivers/infiniband/core/uverbs_std_types_flow_action.c (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 4d6260fd2f52..445c5504f605 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -34,4 +34,5 @@ ib_ucm-y := ucm.o ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \ rdma_core.o uverbs_std_types.o uverbs_ioctl.o \ - uverbs_ioctl_merge.o uverbs_std_types_cq.o + uverbs_ioctl_merge.o uverbs_std_types_cq.o \ + uverbs_std_types_flow_action.o diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 0fabedf446fb..a94b5e7ee02a 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -234,6 +234,9 @@ void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata); extern const struct uverbs_attr_def uverbs_uhw_compat_in; extern const struct uverbs_attr_def uverbs_uhw_compat_out; long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int uverbs_destroy_def_handler(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs); struct ib_uverbs_flow_spec { union { @@ -273,6 +276,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_WQ); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD); +extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION); #define IB_UVERBS_DECLARE_CMD(name) \ ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 2ed8d9203f3b..47b9a85f3854 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -191,6 +191,13 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_ return 0; }; +int uverbs_destroy_def_handler(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +{ + return 0; +} + /* * This spec is used in order to pass information to the hardware driver in a * legacy way. Every verb that could get driver specific data should get this @@ -293,7 +300,8 @@ static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects, &UVERBS_OBJECT(UVERBS_OBJECT_FLOW), &UVERBS_OBJECT(UVERBS_OBJECT_WQ), &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL), - &UVERBS_OBJECT(UVERBS_OBJECT_XRCD)); + &UVERBS_OBJECT(UVERBS_OBJECT_XRCD), + &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION)); const struct uverbs_object_tree_def *uverbs_default_get_objects(void) { diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c new file mode 100644 index 000000000000..3d4c7b8dff48 --- /dev/null +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "uverbs.h" +#include + +static int uverbs_free_flow_action(struct ib_uobject *uobject, + enum rdma_remove_reason why) +{ + struct ib_flow_action *action = uobject->object; + + if (why == RDMA_REMOVE_DESTROY && + atomic_read(&action->usecnt)) + return -EBUSY; + + return action->device->destroy_flow_action(action); +} + +static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, + u32 flags) +{ + u64 verbs_flags = flags; + + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN)) + verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED; + + return verbs_flags; +}; + +static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat) +{ + struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm = + &keymat->keymat.aes_gcm; + + if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) + return -EOPNOTSUPP; + + if (aes_gcm->key_len != 32 && + aes_gcm->key_len != 24 && + aes_gcm->key_len != 16) + return -EINVAL; + + if (aes_gcm->icv_len != 16 && + aes_gcm->icv_len != 8 && + aes_gcm->icv_len != 12) + return -EINVAL; + + return 0; +} + +static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = { + [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm, +}; + +static int parse_esp_ip(enum ib_flow_spec_type proto, + const void __user *val_ptr, + size_t len, union ib_flow_spec *out) +{ + int ret; + const struct ib_uverbs_flow_ipv4_filter ipv4 = { + .src_ip = cpu_to_be32(0xffffffffUL), + .dst_ip = cpu_to_be32(0xffffffffUL), + .proto = 0xff, + .tos = 0xff, + .ttl = 0xff, + .flags = 0xff, + }; + const struct ib_uverbs_flow_ipv6_filter ipv6 = { + .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .flow_label = cpu_to_be32(0xffffffffUL), + .next_hdr = 0xff, + .traffic_class = 0xff, + .hop_limit = 0xff, + }; + union { + struct ib_uverbs_flow_ipv4_filter ipv4; + struct ib_uverbs_flow_ipv6_filter ipv6; + } user_val = {}; + const void *user_pmask; + size_t val_len; + + /* If the flow IPv4/IPv6 flow specifications are extended, the mask + * should be changed as well. + */ + BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) + + sizeof(ipv4.flags) != sizeof(ipv4)); + BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) + + sizeof(ipv6.reserved) != sizeof(ipv6)); + + switch (proto) { + case IB_FLOW_SPEC_IPV4: + if (len > sizeof(user_val.ipv4) && + !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4), + len - sizeof(user_val.ipv4))) + return -EOPNOTSUPP; + + val_len = min_t(size_t, len, sizeof(user_val.ipv4)); + ret = copy_from_user(&user_val.ipv4, val_ptr, + val_len); + if (ret) + return -EFAULT; + + user_pmask = &ipv4; + break; + case IB_FLOW_SPEC_IPV6: + if (len > sizeof(user_val.ipv6) && + !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6), + len - sizeof(user_val.ipv6))) + return -EOPNOTSUPP; + + val_len = min_t(size_t, len, sizeof(user_val.ipv6)); + ret = copy_from_user(&user_val.ipv6, val_ptr, + val_len); + if (ret) + return -EFAULT; + + user_pmask = &ipv6; + break; + default: + return -EOPNOTSUPP; + } + + return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask, + &user_val, + val_len, out); +} + +static int flow_action_esp_get_encap(struct ib_flow_spec_list *out, + struct uverbs_attr_bundle *attrs) +{ + struct ib_uverbs_flow_action_esp_encap uverbs_encap; + int ret; + + ret = uverbs_copy_from(&uverbs_encap, attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP); + if (ret) + return ret; + + /* We currently support only one encap */ + if (uverbs_encap.next_ptr) + return -EOPNOTSUPP; + + if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 && + uverbs_encap.type != IB_FLOW_SPEC_IPV6) + return -EOPNOTSUPP; + + return parse_esp_ip(uverbs_encap.type, + u64_to_user_ptr(uverbs_encap.val_ptr), + uverbs_encap.len, + &out->spec); +} + +struct ib_flow_action_esp_attr { + struct ib_flow_action_attrs_esp hdr; + struct ib_flow_action_attrs_esp_keymats keymat; + struct ib_flow_action_attrs_esp_replays replay; + /* We currently support only one spec */ + struct ib_flow_spec_list encap; +}; + +#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW +static int parse_flow_action_esp(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs, + struct ib_flow_action_esp_attr *esp_attr) +{ + struct ib_uverbs_flow_action_esp uverbs_esp = {}; + int ret; + + /* Optional param, if it doesn't exist, we get -ENOENT and skip it */ + ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_ESN); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + /* This can be called from FLOW_ACTION_ESP_MODIFY where + * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional + */ + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) { + ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS); + if (ret) + return ret; + + if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1)) + return -EOPNOTSUPP; + + esp_attr->hdr.spi = uverbs_esp.spi; + esp_attr->hdr.seq = uverbs_esp.seq; + esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad; + esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts; + } + esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags); + + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) { + esp_attr->keymat.protocol = + uverbs_attr_get_enum_id(attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); + ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat, + attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); + if (ret) + return ret; + + ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat); + if (ret) + return ret; + + esp_attr->hdr.keymat = &esp_attr->keymat; + } + + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) { + esp_attr->replay.protocol = + uverbs_attr_get_enum_id(attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); + + ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay, + attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); + if (ret) + return ret; + + esp_attr->hdr.replay = &esp_attr->replay; + } + + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) { + ret = flow_action_esp_get_encap(&esp_attr->encap, attrs); + if (ret) + return ret; + + esp_attr->hdr.encap = &esp_attr->encap; + } + + return 0; +} + +static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +{ + int ret; + struct ib_uobject *uobj; + struct ib_flow_action *action; + struct ib_flow_action_esp_attr esp_attr = {}; + + if (!ib_dev->create_flow_action_esp) + return -EOPNOTSUPP; + + ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr); + if (ret) + return ret; + + /* No need to check as this attribute is marked as MANDATORY */ + uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject; + action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs); + if (IS_ERR(action)) + return PTR_ERR(action); + + atomic_set(&action->usecnt, 0); + action->device = ib_dev; + action->type = IB_FLOW_ACTION_ESP; + action->uobject = uobj; + uobj->object = action; + + return 0; +} + +static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { + [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { + .ptr = { + .type = UVERBS_ATTR_TYPE_PTR_IN, + UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm), + .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, + }, + }, +}; + +static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { + [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { + .ptr = { + .type = UVERBS_ATTR_TYPE_PTR_IN, + UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size), + .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, + } + }, +}; + +static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, + &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION, + UVERBS_ACCESS_NEW, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, + UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY | + UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)), + &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, + uverbs_flow_action_esp_keymat, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, + uverbs_flow_action_esp_replay), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, + UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type))); + +static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTROY, + uverbs_destroy_def_handler, + &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, + UVERBS_OBJECT_FLOW_ACTION, + UVERBS_ACCESS_DESTROY, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY))); + +DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW_ACTION, + &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action), + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY)); + diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 1e3059ce73b6..49da92143341 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -65,6 +65,7 @@ #include #include #include +#include #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -2001,6 +2002,63 @@ struct ib_flow { struct ib_uobject *uobject; }; +enum ib_flow_action_type { + IB_FLOW_ACTION_UNSPECIFIED, + IB_FLOW_ACTION_ESP = 1, +}; + +struct ib_flow_action_attrs_esp_keymats { + enum ib_uverbs_flow_action_esp_keymat protocol; + union { + struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; + } keymat; +}; + +struct ib_flow_action_attrs_esp_replays { + enum ib_uverbs_flow_action_esp_replay protocol; + union { + struct ib_uverbs_flow_action_esp_replay_bmp bmp; + } replay; +}; + +enum ib_flow_action_attrs_esp_flags { + /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags + * This is done in order to share the same flags between user-space and + * kernel and spare an unnecessary translation. + */ + + /* Kernel flags */ + IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, +}; + +struct ib_flow_spec_list { + struct ib_flow_spec_list *next; + union ib_flow_spec spec; +}; + +struct ib_flow_action_attrs_esp { + struct ib_flow_action_attrs_esp_keymats *keymat; + struct ib_flow_action_attrs_esp_replays *replay; + struct ib_flow_spec_list *encap; + /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. + * Value of 0 is a valid value. + */ + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + /* Use enum ib_flow_action_attrs_esp_flags */ + u64 flags; + u64 hard_limit_pkts; +}; + +struct ib_flow_action { + struct ib_device *device; + struct ib_uobject *uobject; + enum ib_flow_action_type type; + atomic_t usecnt; +}; + struct ib_mad_hdr; struct ib_grh; @@ -2077,6 +2135,8 @@ struct ib_port_pkey_list { struct list_head pkey_list; }; +struct uverbs_attr_bundle; + struct ib_device { /* Do not access @dma_device directly from ULP nor from HW drivers. */ struct device *dma_device; @@ -2331,6 +2391,11 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); + struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); + int (*destroy_flow_action)(struct ib_flow_action *action); + /** * rdma netdev operation * diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 77bbbed17ed5..d3a2c03ea4a8 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -53,6 +53,7 @@ enum uverbs_default_objects { UVERBS_OBJECT_XRCD, UVERBS_OBJECT_RWQ_IND_TBL, UVERBS_OBJECT_WQ, + UVERBS_OBJECT_FLOW_ACTION, }; enum { @@ -75,9 +76,27 @@ enum uverbs_attrs_destroy_cq_cmd_attr_ids { UVERBS_ATTR_DESTROY_CQ_RESP, }; +enum uverbs_attrs_create_flow_action_esp { + UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, + UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, + UVERBS_ATTR_FLOW_ACTION_ESP_ESN, + UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, + UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, + UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, +}; + +enum uverbs_attrs_destroy_flow_action_esp { + UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, +}; + enum uverbs_methods_cq { UVERBS_METHOD_CQ_CREATE, UVERBS_METHOD_CQ_DESTROY, }; +enum uverbs_methods_actions_flow_action_ops { + UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, + UVERBS_METHOD_FLOW_ACTION_DESTROY, +}; + #endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 173629ecc09b..04e46ea517d3 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -40,4 +40,63 @@ #define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name #endif +enum ib_uverbs_flow_action_esp_keymat { + IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM, +}; + +enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo { + IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ, +}; + +struct ib_uverbs_flow_action_esp_keymat_aes_gcm { + __aligned_u64 iv; + __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */ + + __u32 salt; + __u32 icv_len; + + __u32 key_len; + __u32 aes_key[256 / 32]; +}; + +enum ib_uverbs_flow_action_esp_replay { + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE, + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP, +}; + +struct ib_uverbs_flow_action_esp_replay_bmp { + __u32 size; +}; + +enum ib_uverbs_flow_action_esp_flags { + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0, + + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1, + + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2, + + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3, +}; + +struct ib_uverbs_flow_action_esp_encap { + /* This struct represents a list of pointers to flow_xxxx_filter that + * encapsulates the payload in ESP tunnel mode. + */ + RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */ + RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr); + __u16 len; /* Len of the filter struct val_ptr points to */ + __u16 type; /* Use flow_spec_type enum */ +}; + +struct ib_uverbs_flow_action_esp { + __u32 spi; + __u32 seq; + __u32 tfc_pad; + __u32 flags; + __aligned_u64 hard_limit_pkts; +}; + #endif -- cgit v1.2.3-59-g8ed1b From 9b828441976ef719f1008a9855fff95a45e474b8 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Wed, 28 Mar 2018 09:27:46 +0300 Subject: IB/uverbs: Add action_handle flow steering specification Binding a flow_action to flow steering rule requires using a new specification. Therefore, adding such an IB_FLOW_SPEC_ACTION_HANDLE flow specification. Flow steering rules could use flow_action(s) and as of that we need to avoid deleting flow_action(s) as long as they're being used. Moreover, when the attached rules are deleted, action_handle reference count should be decremented. Introducing a new mechanism of flow resources to keep track on the attached action_handle(s). Later on, this mechanism should be extended to other attached flow steering resources like flow counters. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs.h | 8 +++ drivers/infiniband/core/uverbs_cmd.c | 86 +++++++++++++++++++++++++++--- drivers/infiniband/core/uverbs_std_types.c | 14 ++++- include/rdma/ib_verbs.h | 8 +++ include/uapi/rdma/ib_user_verbs.h | 13 +++++ 5 files changed, 121 insertions(+), 8 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index a94b5e7ee02a..1bac0b51686a 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -203,11 +203,18 @@ struct ib_ucq_object { u32 async_events_reported; }; +struct ib_uflow_resources; +struct ib_uflow_object { + struct ib_uobject uobject; + struct ib_uflow_resources *resources; +}; + extern const struct file_operations uverbs_event_fops; void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue); struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file, struct ib_device *ib_dev); void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file); +void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res); void ib_uverbs_release_ucq(struct ib_uverbs_file *file, struct ib_uverbs_completion_event_file *ev_file, @@ -254,6 +261,7 @@ struct ib_uverbs_flow_spec { struct ib_uverbs_flow_spec_ipv6 ipv6; struct ib_uverbs_flow_spec_action_tag flow_tag; struct ib_uverbs_flow_spec_action_drop drop; + struct ib_uverbs_flow_spec_action_handle action; }; }; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index f6ffe18df679..69050dd77421 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2739,8 +2739,52 @@ out_put: return ret ? ret : in_len; } -static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, - union ib_flow_spec *ib_spec) +struct ib_uflow_resources { + size_t max; + size_t num; + struct ib_flow_action *collection[0]; +}; + +static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) +{ + struct ib_uflow_resources *resources; + + resources = + kmalloc(sizeof(*resources) + + num_specs * sizeof(*resources->collection), GFP_KERNEL); + + if (!resources) + return NULL; + + resources->num = 0; + resources->max = num_specs; + + return resources; +} + +void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) +{ + unsigned int i; + + for (i = 0; i < uflow_res->num; i++) + atomic_dec(&uflow_res->collection[i]->usecnt); + + kfree(uflow_res); +} + +static void flow_resources_add(struct ib_uflow_resources *uflow_res, + struct ib_flow_action *action) +{ + WARN_ON(uflow_res->num >= uflow_res->max); + + atomic_inc(&action->usecnt); + uflow_res->collection[uflow_res->num++] = action; +} + +static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext, + struct ib_uverbs_flow_spec *kern_spec, + union ib_flow_spec *ib_spec, + struct ib_uflow_resources *uflow_res) { ib_spec->type = kern_spec->type; switch (ib_spec->type) { @@ -2759,6 +2803,21 @@ static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); break; + case IB_FLOW_SPEC_ACTION_HANDLE: + if (kern_spec->action.size != + sizeof(struct ib_uverbs_flow_spec_action_handle)) + return -EOPNOTSUPP; + ib_spec->action.act = uobj_get_obj_read(flow_action, + UVERBS_OBJECT_FLOW_ACTION, + kern_spec->action.handle, + ucontext); + if (!ib_spec->action.act) + return -EINVAL; + ib_spec->action.size = + sizeof(struct ib_flow_spec_action_handle); + flow_resources_add(uflow_res, ib_spec->action.act); + uobj_put_obj_read(ib_spec->action.act); + break; default: return -EINVAL; } @@ -2900,14 +2959,17 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, kern_filter_sz, ib_spec); } -static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, - union ib_flow_spec *ib_spec) +static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext, + struct ib_uverbs_flow_spec *kern_spec, + union ib_flow_spec *ib_spec, + struct ib_uflow_resources *uflow_res) { if (kern_spec->reserved) return -EINVAL; if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) - return kern_spec_to_ib_spec_action(kern_spec, ib_spec); + return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec, + uflow_res); else return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); } @@ -3322,10 +3384,12 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, struct ib_uverbs_create_flow cmd; struct ib_uverbs_create_flow_resp resp; struct ib_uobject *uobj; + struct ib_uflow_object *uflow; struct ib_flow *flow_id; struct ib_uverbs_flow_attr *kern_flow_attr; struct ib_flow_attr *flow_attr; struct ib_qp *qp; + struct ib_uflow_resources *uflow_res; int err = 0; void *kern_spec; void *ib_spec; @@ -3403,6 +3467,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, err = -ENOMEM; goto err_put; } + uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs); + if (!uflow_res) { + err = -ENOMEM; + goto err_free_flow_attr; + } flow_attr->type = kern_flow_attr->type; flow_attr->priority = kern_flow_attr->priority; @@ -3417,7 +3486,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && cmd.flow_attr.size >= ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { - err = kern_spec_to_ib_spec(kern_spec, ib_spec); + err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, + uflow_res); if (err) goto err_free; flow_attr->size += @@ -3439,6 +3509,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, } flow_id->uobject = uobj; uobj->object = flow_id; + uflow = container_of(uobj, typeof(*uflow), uobject); + uflow->resources = uflow_res; memset(&resp, 0, sizeof(resp)); resp.flow_handle = uobj->id; @@ -3457,6 +3529,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, err_copy: ib_destroy_flow(flow_id); err_free: + ib_uverbs_flow_resources_free(uflow_res); +err_free_flow_attr: kfree(flow_attr); err_put: uobj_put_obj_read(qp); diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 47b9a85f3854..173eab8d3482 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -48,7 +48,16 @@ static int uverbs_free_ah(struct ib_uobject *uobject, static int uverbs_free_flow(struct ib_uobject *uobject, enum rdma_remove_reason why) { - return ib_destroy_flow((struct ib_flow *)uobject->object); + int ret; + struct ib_flow *flow = (struct ib_flow *)uobject->object; + struct ib_uflow_object *uflow = + container_of(uobject, struct ib_uflow_object, uobject); + + ret = ib_destroy_flow(flow); + if (!ret) + ib_uverbs_flow_resources_free(uflow->resources); + + return ret; } static int uverbs_free_mw(struct ib_uobject *uobject, @@ -268,7 +277,8 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH, &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW, - &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow)); + &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object), + 0, uverbs_free_flow)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_WQ, &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 49da92143341..c1b9cba79710 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1836,6 +1836,7 @@ enum ib_flow_spec_type { /* Actions */ IB_FLOW_SPEC_ACTION_TAG = 0x1000, IB_FLOW_SPEC_ACTION_DROP = 0x1001, + IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 @@ -1969,6 +1970,12 @@ struct ib_flow_spec_action_drop { u16 size; }; +struct ib_flow_spec_action_handle { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_action *act; +}; + union ib_flow_spec { struct { u32 type; @@ -1982,6 +1989,7 @@ union ib_flow_spec { struct ib_flow_spec_tunnel tunnel; struct ib_flow_spec_action_tag flow_tag; struct ib_flow_spec_action_drop drop; + struct ib_flow_spec_action_handle action; }; struct ib_flow_attr { diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index aa0615105563..ac41ce234186 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -984,6 +984,19 @@ struct ib_uverbs_flow_spec_action_drop { }; }; +struct ib_uverbs_flow_spec_action_handle { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + __u32 handle; + __u32 reserved1; +}; + struct ib_uverbs_flow_tunnel_filter { __be32 tunnel_id; }; -- cgit v1.2.3-59-g8ed1b From 21e82d3e1dcf9ce61ae387ca1a507cf53665336a Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Wed, 28 Mar 2018 09:27:47 +0300 Subject: IB/uverbs: Introduce egress flow steering The egress flag indicates that this flow steering rule is for egress traffic. The scope of an egress rule is port-wide, meaning all packets originated from that port, which match the steering rule specification will be effected by this steering rule's action. Reviewed-by: Yishai Hadas Signed-off-by: Boris Pismenny Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/rdma/ib_verbs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c1b9cba79710..c674fc1e596b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1854,7 +1854,8 @@ enum ib_flow_domain { enum ib_flow_flags { IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ - IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ + IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ + IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ }; struct ib_flow_eth_filter { -- cgit v1.2.3-59-g8ed1b From 7d12f8d5a1645275dd452138bf1fe478be736704 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Wed, 28 Mar 2018 09:27:48 +0300 Subject: IB/uverbs: Add modify ESP flow_action flow_actions of ESP type could be modified during runtime. This could be common for example when ESN should be changed. Adding a new UVERBS_FLOW_ACTION_ESP_MODIFY method for changing ESP parameters of an existing ESP flow_action. The new method uses the UVERBS_FLOW_ACTION_ESP_CREATE attributes, but adds a new IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS which means ESP_ATTRS should be changed. In addition, we add a new FLOW_ACTION_ESP_REPLAY_NONE replay type that could be used when one wants to disable a replay protection over a specific flow_action. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- .../infiniband/core/uverbs_std_types_flow_action.c | 97 ++++++++++++++++++++-- include/rdma/ib_verbs.h | 4 + include/uapi/rdma/ib_user_ioctl_cmds.h | 1 + 3 files changed, 97 insertions(+), 5 deletions(-) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c index 3d4c7b8dff48..cbcec3da12f6 100644 --- a/drivers/infiniband/core/uverbs_std_types_flow_action.c +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -46,13 +46,17 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject, } static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, - u32 flags) + u32 flags, bool is_modify) { u64 verbs_flags = flags; if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN)) verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED; + if (is_modify && uverbs_attr_is_valid(attrs, + UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) + verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS; + return verbs_flags; }; @@ -81,6 +85,32 @@ static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_att [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm, }; +static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay, + bool is_modify) +{ + /* This is used in order to modify an esp flow action with an enabled + * replay protection to a disabled one. This is only supported via + * modify, as in create verb we can simply drop the REPLAY attribute and + * achieve the same thing. + */ + return is_modify ? 0 : -EINVAL; +} + +static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay, + bool is_modify) +{ + /* Some replay protections could always be enabled without validating + * anything. + */ + return 0; +} + +static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay, + bool is_modify) = { + [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none, + [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok, +}; + static int parse_esp_ip(enum ib_flow_spec_type proto, const void __user *val_ptr, size_t len, union ib_flow_spec *out) @@ -194,7 +224,8 @@ struct ib_flow_action_esp_attr { static int parse_flow_action_esp(struct ib_device *ib_dev, struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs, - struct ib_flow_action_esp_attr *esp_attr) + struct ib_flow_action_esp_attr *esp_attr, + bool is_modify) { struct ib_uverbs_flow_action_esp uverbs_esp = {}; int ret; @@ -222,7 +253,8 @@ static int parse_flow_action_esp(struct ib_device *ib_dev, esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad; esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts; } - esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags); + esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags, + is_modify); if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) { esp_attr->keymat.protocol = @@ -252,6 +284,11 @@ static int parse_flow_action_esp(struct ib_device *ib_dev, if (ret) return ret; + ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay, + is_modify); + if (ret) + return ret; + esp_attr->hdr.replay = &esp_attr->replay; } @@ -278,7 +315,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device if (!ib_dev->create_flow_action_esp) return -EOPNOTSUPP; - ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr); + ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, false); if (ret) return ret; @@ -297,6 +334,33 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device return 0; } +static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +{ + int ret; + struct ib_uobject *uobj; + struct ib_flow_action *action; + struct ib_flow_action_esp_attr esp_attr = {}; + + if (!ib_dev->modify_flow_action_esp) + return -EOPNOTSUPP; + + ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, true); + if (ret) + return ret; + + uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject; + action = uobj->object; + + if (action->type != IB_FLOW_ACTION_ESP) + return -EINVAL; + + return ib_dev->modify_flow_action_esp(action, + &esp_attr.hdr, + attrs); +} + static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { .ptr = { @@ -308,6 +372,13 @@ static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { }; static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { + [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { + .ptr = { + .type = UVERBS_ATTR_TYPE_PTR_IN, + /* No need to specify any data */ + .len = 0, + } + }, [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, @@ -334,6 +405,21 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type))); +static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, + &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION, + UVERBS_ACCESS_WRITE, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, + UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)), + &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, + uverbs_flow_action_esp_keymat), + &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, + uverbs_flow_action_esp_replay), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, + UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type))); + static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTROY, uverbs_destroy_def_handler, &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, @@ -344,5 +430,6 @@ static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTRO DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW_ACTION, &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action), &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY)); + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY), + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c674fc1e596b..8c3ca073016d 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2038,6 +2038,7 @@ enum ib_flow_action_attrs_esp_flags { /* Kernel flags */ IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, + IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, }; struct ib_flow_spec_list { @@ -2404,6 +2405,9 @@ struct ib_device { const struct ib_flow_action_attrs_esp *attr, struct uverbs_attr_bundle *attrs); int (*destroy_flow_action)(struct ib_flow_action *action); + int (*modify_flow_action_esp)(struct ib_flow_action *action, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); /** * rdma netdev operation diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index d3a2c03ea4a8..500b64a444ad 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -97,6 +97,7 @@ enum uverbs_methods_cq { enum uverbs_methods_actions_flow_action_ops { UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, UVERBS_METHOD_FLOW_ACTION_DESTROY, + UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, }; #endif -- cgit v1.2.3-59-g8ed1b From 56ab0b38b80e5771920e163cc9bd52504b03f539 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Wed, 28 Mar 2018 09:27:49 +0300 Subject: IB/uverbs: Introduce ESP steering match filter Adding a new ESP steering match filter that could match against spi and seq used in IPSec protocol. Reviewed-by: Yishai Hadas Signed-off-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs.h | 1 + drivers/infiniband/core/uverbs_cmd.c | 11 +++++++++++ include/rdma/ib_verbs.h | 16 ++++++++++++++++ include/uapi/rdma/ib_user_verbs.h | 18 ++++++++++++++++++ 4 files changed, 46 insertions(+) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 1bac0b51686a..3229e87d03cb 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -257,6 +257,7 @@ struct ib_uverbs_flow_spec { }; struct ib_uverbs_flow_spec_eth eth; struct ib_uverbs_flow_spec_ipv4 ipv4; + struct ib_uverbs_flow_spec_esp esp; struct ib_uverbs_flow_spec_tcp_udp tcp_udp; struct ib_uverbs_flow_spec_ipv6 ipv6; struct ib_uverbs_flow_spec_action_tag flow_tag; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 69050dd77421..f38600490fd1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2931,6 +2931,17 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) return -EINVAL; break; + case IB_FLOW_SPEC_ESP: + ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); + actual_filter_sz = spec_filter_size(kern_spec_mask, + kern_filter_sz, + ib_filter_sz); + if (actual_filter_sz <= 0) + return -EINVAL; + ib_spec->esp.size = sizeof(struct ib_flow_spec_esp); + memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz); + memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); + break; default: return -EINVAL; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8c3ca073016d..a6dba77c1b28 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1828,6 +1828,7 @@ enum ib_flow_spec_type { /* L3 header*/ IB_FLOW_SPEC_IPV4 = 0x30, IB_FLOW_SPEC_IPV6 = 0x31, + IB_FLOW_SPEC_ESP = 0x34, /* L4 headers*/ IB_FLOW_SPEC_TCP = 0x40, IB_FLOW_SPEC_UDP = 0x41, @@ -1960,6 +1961,20 @@ struct ib_flow_spec_tunnel { struct ib_flow_tunnel_filter mask; }; +struct ib_flow_esp_filter { + __be32 spi; + __be32 seq; + /* Must be last */ + u8 real_sz[0]; +}; + +struct ib_flow_spec_esp { + u32 type; + u16 size; + struct ib_flow_esp_filter val; + struct ib_flow_esp_filter mask; +}; + struct ib_flow_spec_action_tag { enum ib_flow_spec_type type; u16 size; @@ -1988,6 +2003,7 @@ union ib_flow_spec { struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_esp esp; struct ib_flow_spec_action_tag flow_tag; struct ib_flow_spec_action_drop drop; struct ib_flow_spec_action_handle action; diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index ac41ce234186..df5d339952fe 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -1014,6 +1014,24 @@ struct ib_uverbs_flow_spec_tunnel { struct ib_uverbs_flow_tunnel_filter mask; }; +struct ib_uverbs_flow_spec_esp_filter { + __u32 spi; + __u32 seq; +}; + +struct ib_uverbs_flow_spec_esp { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_spec_esp_filter val; + struct ib_uverbs_flow_spec_esp_filter mask; +}; + struct ib_uverbs_flow_attr { __u32 type; __u16 size; -- cgit v1.2.3-59-g8ed1b From 1d8eeb9f6a6e0d8ac43a54fd95126044bf8d6695 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 5 Apr 2018 18:53:23 +0300 Subject: IB/uverbs: Add device memory capabilities reporting This change allows vendors to report device memory capability max_dm_size - to user via uverbs command. Signed-off-by: Ariel Levkovich Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_cmd.c | 6 ++++++ include/rdma/ib_verbs.h | 1 + 2 files changed, 7 insertions(+) (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index f38600490fd1..13cb5e4deb86 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -4006,6 +4006,12 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, resp.cq_moderation_caps.max_cq_moderation_period = attr.cq_caps.max_cq_moderation_period; resp.response_length += sizeof(resp.cq_moderation_caps); + + if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size)) + goto end; + + resp.max_dm_size = attr.max_dm_size; + resp.response_length += sizeof(resp.max_dm_size); end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); return err; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a6dba77c1b28..ed425627efd8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -372,6 +372,7 @@ struct ib_device_attr { u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ struct ib_tm_caps tm_caps; struct ib_cq_caps cq_caps; + u64 max_dm_size; }; enum ib_mtu { -- cgit v1.2.3-59-g8ed1b From bee76d7ab5d270919e80e4764df7cd7e4f06ed24 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 5 Apr 2018 18:53:24 +0300 Subject: IB/uverbs: Add alloc/free dm uverbs ioctl support This change adds uverbs support for allocation/freeing of device memory commands. A new uverbs object is defined of type idr to represent and track the new resource type allocation per context. The API requires provider driver to implement 2 new ib_device callbacks - one for allocation and one for deallocation which return and accept (respectively) the ib_dm object which represents the allocated memory on the device. The support is added via the ioctl command infrastructure only. Signed-off-by: Ariel Levkovich Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/Makefile | 2 +- drivers/infiniband/core/uverbs.h | 1 + drivers/infiniband/core/uverbs_std_types.c | 3 +- drivers/infiniband/core/uverbs_std_types_dm.c | 108 ++++++++++++++++++++++++++ include/rdma/ib_verbs.h | 20 ++++- include/uapi/rdma/ib_user_ioctl_cmds.h | 15 ++++ 6 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 drivers/infiniband/core/uverbs_std_types_dm.c (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 445c5504f605..636da34f8308 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -35,4 +35,4 @@ ib_ucm-y := ucm.o ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \ rdma_core.o uverbs_std_types.o uverbs_ioctl.o \ uverbs_ioctl_merge.o uverbs_std_types_cq.o \ - uverbs_std_types_flow_action.o + uverbs_std_types_flow_action.o uverbs_std_types_dm.o diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 3229e87d03cb..cfb51618ab7a 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -286,6 +286,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_WQ); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION); +extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM); #define IB_UVERBS_DECLARE_CMD(name) \ ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 173eab8d3482..4fedf59ec396 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -311,7 +311,8 @@ static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects, &UVERBS_OBJECT(UVERBS_OBJECT_WQ), &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL), &UVERBS_OBJECT(UVERBS_OBJECT_XRCD), - &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION)); + &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION), + &UVERBS_OBJECT(UVERBS_OBJECT_DM)); const struct uverbs_object_tree_def *uverbs_default_get_objects(void) { diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c new file mode 100644 index 000000000000..8b681575b615 --- /dev/null +++ b/drivers/infiniband/core/uverbs_std_types_dm.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "uverbs.h" +#include + +static int uverbs_free_dm(struct ib_uobject *uobject, + enum rdma_remove_reason why) +{ + struct ib_dm *dm = uobject->object; + + if (why == RDMA_REMOVE_DESTROY && atomic_read(&dm->usecnt)) + return -EBUSY; + + return dm->device->dealloc_dm(dm); +} + +static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +{ + struct ib_ucontext *ucontext = file->ucontext; + struct ib_dm_alloc_attr attr = {}; + struct ib_uobject *uobj; + struct ib_dm *dm; + int ret; + + if (!ib_dev->alloc_dm) + return -EOPNOTSUPP; + + ret = uverbs_copy_from(&attr.length, attrs, + UVERBS_ATTR_ALLOC_DM_LENGTH); + if (ret) + return ret; + + ret = uverbs_copy_from(&attr.alignment, attrs, + UVERBS_ATTR_ALLOC_DM_ALIGNMENT); + if (ret) + return ret; + + uobj = uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)->obj_attr.uobject; + + dm = ib_dev->alloc_dm(ib_dev, ucontext, &attr, attrs); + if (IS_ERR(dm)) + return PTR_ERR(dm); + + dm->device = ib_dev; + dm->length = attr.length; + dm->uobject = uobj; + atomic_set(&dm->usecnt, 0); + + uobj->object = dm; + + return 0; +} + +static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_ALLOC, + &UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE, UVERBS_OBJECT_DM, + UVERBS_ACCESS_NEW, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH, + UVERBS_ATTR_TYPE(u64), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT, + UVERBS_ATTR_TYPE(u32), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY))); + +static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_DM_FREE, + uverbs_destroy_def_handler, + &UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE, + UVERBS_OBJECT_DM, + UVERBS_ACCESS_DESTROY, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY))); + +DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM, + /* 1 is used in order to free the DM after MRs */ + &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_dm), + &UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC), + &UVERBS_METHOD(UVERBS_METHOD_DM_FREE)); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ed425627efd8..6806c4f5657a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -321,6 +321,12 @@ struct ib_cq_caps { u16 max_cq_moderation_period; }; +struct ib_dm_alloc_attr { + u64 length; + u32 alignment; + u32 flags; +}; + struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; @@ -1769,6 +1775,14 @@ struct ib_qp { struct rdma_restrack_entry res; }; +struct ib_dm { + struct ib_device *device; + u32 length; + u32 flags; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + struct ib_mr { struct ib_device *device; struct ib_pd *pd; @@ -2425,7 +2439,11 @@ struct ib_device { int (*modify_flow_action_esp)(struct ib_flow_action *action, const struct ib_flow_action_attrs_esp *attr, struct uverbs_attr_bundle *attrs); - + struct ib_dm * (*alloc_dm)(struct ib_device *device, + struct ib_ucontext *context, + struct ib_dm_alloc_attr *attr, + struct uverbs_attr_bundle *attrs); + int (*dealloc_dm)(struct ib_dm *dm); /** * rdma netdev operation * diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 500b64a444ad..6034df2625c6 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -54,6 +54,7 @@ enum uverbs_default_objects { UVERBS_OBJECT_RWQ_IND_TBL, UVERBS_OBJECT_WQ, UVERBS_OBJECT_FLOW_ACTION, + UVERBS_OBJECT_DM, }; enum { @@ -100,4 +101,18 @@ enum uverbs_methods_actions_flow_action_ops { UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, }; +enum uverbs_attrs_alloc_dm_cmd_attr_ids { + UVERBS_ATTR_ALLOC_DM_HANDLE, + UVERBS_ATTR_ALLOC_DM_LENGTH, + UVERBS_ATTR_ALLOC_DM_ALIGNMENT, +}; + +enum uverbs_attrs_free_dm_cmd_attr_ids { + UVERBS_ATTR_FREE_DM_HANDLE, +}; + +enum uverbs_methods_dm { + UVERBS_METHOD_DM_ALLOC, + UVERBS_METHOD_DM_FREE, +}; #endif -- cgit v1.2.3-59-g8ed1b From be934cca9e987e73eb20e3c80731a9580d5acc79 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 5 Apr 2018 18:53:25 +0300 Subject: IB/uverbs: Add device memory registration ioctl support Adding new ioctl method for the MR object - REG_DM_MR. This command can be used by users to register an allocated device memory buffer as an MR and receive lkey and rkey to be used within work requests. It is added as a new method under the MR object and using a new ib_device callback - reg_dm_mr. The command creates a standard ib_mr object which represents the registered memory. Signed-off-by: Ariel Levkovich Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/Makefile | 3 +- drivers/infiniband/core/uverbs_std_types.c | 10 -- drivers/infiniband/core/uverbs_std_types_mr.c | 147 ++++++++++++++++++++++++++ drivers/infiniband/core/verbs.c | 6 +- include/rdma/ib_verbs.h | 11 ++ include/rdma/uverbs_ioctl.h | 12 +++ include/uapi/rdma/ib_user_ioctl_cmds.h | 16 +++ 7 files changed, 193 insertions(+), 12 deletions(-) create mode 100644 drivers/infiniband/core/uverbs_std_types_mr.c (limited to 'include/rdma/ib_verbs.h') diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 636da34f8308..dda9e856e3fa 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -35,4 +35,5 @@ ib_ucm-y := ucm.o ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \ rdma_core.o uverbs_std_types.o uverbs_ioctl.o \ uverbs_ioctl_merge.o uverbs_std_types_cq.o \ - uverbs_std_types_flow_action.o uverbs_std_types_dm.o + uverbs_std_types_flow_action.o uverbs_std_types_dm.o \ + uverbs_std_types_mr.o diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 4fedf59ec396..569f48bd821e 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -144,12 +144,6 @@ static int uverbs_free_srq(struct ib_uobject *uobject, return ret; } -static int uverbs_free_mr(struct ib_uobject *uobject, - enum rdma_remove_reason why) -{ - return ib_dereg_mr((struct ib_mr *)uobject->object); -} - static int uverbs_free_xrcd(struct ib_uobject *uobject, enum rdma_remove_reason why) { @@ -265,10 +259,6 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_QP, DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW, &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw)); -DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MR, - /* 1 is used in order to free the MR after all the MWs */ - &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr)); - DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_SRQ, &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0, uverbs_free_srq)); diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c new file mode 100644 index 000000000000..68f7cadf088f --- /dev/null +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "uverbs.h" +#include + +static int uverbs_free_mr(struct ib_uobject *uobject, + enum rdma_remove_reason why) +{ + return ib_dereg_mr((struct ib_mr *)uobject->object); +} + +static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev, + struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +{ + struct ib_dm_mr_attr attr = {}; + struct ib_uobject *uobj; + struct ib_dm *dm; + struct ib_pd *pd; + struct ib_mr *mr; + int ret; + + if (!ib_dev->reg_dm_mr) + return -EOPNOTSUPP; + + ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET); + if (ret) + return ret; + + ret = uverbs_copy_from(&attr.length, attrs, + UVERBS_ATTR_REG_DM_MR_LENGTH); + if (ret) + return ret; + + ret = uverbs_copy_from(&attr.access_flags, attrs, + UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS); + if (ret) + return ret; + + if (!(attr.access_flags & IB_ZERO_BASED)) + return -EINVAL; + + ret = ib_check_mr_access(attr.access_flags); + if (ret) + return ret; + + pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE); + + dm = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE); + + uobj = uverbs_attr_get(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE)->obj_attr.uobject; + + if (attr.offset > dm->length || attr.length > dm->length || + attr.length > dm->length - attr.offset) + return -EINVAL; + + mr = pd->device->reg_dm_mr(pd, dm, &attr, attrs); + if (IS_ERR(mr)) + return PTR_ERR(mr); + + mr->device = pd->device; + mr->pd = pd; + mr->dm = dm; + mr->uobject = uobj; + atomic_inc(&pd->usecnt); + atomic_inc(&dm->usecnt); + + uobj->object = mr; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey, + sizeof(mr->lkey)); + if (ret) + goto err_dereg; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY, + &mr->rkey, sizeof(mr->rkey)); + if (ret) + goto err_dereg; + + return 0; + +err_dereg: + ib_dereg_mr(mr); + + return ret; +} + +static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_MR_REG, + &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_OBJECT_MR, + UVERBS_ACCESS_NEW, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET, + UVERBS_ATTR_TYPE(u64), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH, + UVERBS_ATTR_TYPE(u64), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE, UVERBS_OBJECT_PD, + UVERBS_ACCESS_READ, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, + UVERBS_ATTR_TYPE(u32), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE, UVERBS_OBJECT_DM, + UVERBS_ACCESS_READ, + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY, + UVERBS_ATTR_TYPE(u32), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)), + &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY, + UVERBS_ATTR_TYPE(u32), + UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY))); + +DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MR, + /* 1 is used in order to free the MR after all the MWs */ + &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr), + &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG)); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 95e3b307c93a..7eff3aeffe01 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1616,12 +1616,16 @@ EXPORT_SYMBOL(ib_resize_cq); int ib_dereg_mr(struct ib_mr *mr) { struct ib_pd *pd = mr->pd; + struct ib_dm *dm = mr->dm; int ret; rdma_restrack_del(&mr->res); ret = mr->device->dereg_mr(mr); - if (!ret) + if (!ret) { atomic_dec(&pd->usecnt); + if (dm) + atomic_dec(&dm->usecnt); + } return ret; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6806c4f5657a..4bd24c48b1ad 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -321,6 +321,12 @@ struct ib_cq_caps { u16 max_cq_moderation_period; }; +struct ib_dm_mr_attr { + u64 length; + u64 offset; + u32 access_flags; +}; + struct ib_dm_alloc_attr { u64 length; u32 alignment; @@ -1797,6 +1803,8 @@ struct ib_mr { struct list_head qp_entry; /* FR */ }; + struct ib_dm *dm; + /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -2444,6 +2452,9 @@ struct ib_device { struct ib_dm_alloc_attr *attr, struct uverbs_attr_bundle *attrs); int (*dealloc_dm)(struct ib_dm *dm); + struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, + struct ib_dm_mr_attr *attr, + struct uverbs_attr_bundle *attrs); /** * rdma netdev operation * diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 3d6ac684b8f0..4a4201d997a7 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -408,6 +408,18 @@ static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs return attr->ptr_attr.enum_id; } +static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, + u16 idx) +{ + struct ib_uobject *uobj = + uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject; + + if (IS_ERR(uobj)) + return uobj; + + return uobj->object; +} + static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx, const void *from, size_t size) { diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 6034df2625c6..83e3890eef20 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -115,4 +115,20 @@ enum uverbs_methods_dm { UVERBS_METHOD_DM_ALLOC, UVERBS_METHOD_DM_FREE, }; + +enum uverbs_attrs_reg_dm_mr_cmd_attr_ids { + UVERBS_ATTR_REG_DM_MR_HANDLE, + UVERBS_ATTR_REG_DM_MR_OFFSET, + UVERBS_ATTR_REG_DM_MR_LENGTH, + UVERBS_ATTR_REG_DM_MR_PD_HANDLE, + UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, + UVERBS_ATTR_REG_DM_MR_DM_HANDLE, + UVERBS_ATTR_REG_DM_MR_RESP_LKEY, + UVERBS_ATTR_REG_DM_MR_RESP_RKEY, +}; + +enum uverbs_methods_mr { + UVERBS_METHOD_DM_MR_REG, +}; + #endif -- cgit v1.2.3-59-g8ed1b