From c4367a26357be501338e41ceae7ebb7ce57064e5 Mon Sep 17 00:00:00 2001 From: Shamir Rabinovitch Date: Sun, 31 Mar 2019 19:10:05 +0300 Subject: IB: Pass uverbs_attr_bundle down ib_x destroy path The uverbs_attr_bundle with the ucontext is sent down to the drivers ib_x destroy path as ib_udata. The next patch will use the ib_udata to free the drivers destroy path from the dependency in 'uobject->context' as we already did for the create path. Signed-off-by: Shamir Rabinovitch Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v1.c') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 97515c340134..1863516f6be9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -855,17 +855,17 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) create_lp_qp_failed: for (i -= 1; i >= 0; i--) { hr_qp = free_mr->mr_free_qp[i]; - if (hns_roce_v1_destroy_qp(&hr_qp->ibqp)) + if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL)) dev_err(dev, "Destroy qp %d for mr free failed!\n", i); } - hns_roce_dealloc_pd(pd); + hns_roce_dealloc_pd(pd, NULL); alloc_pd_failed: kfree(pd); alloc_mem_failed: - if (hns_roce_ib_destroy_cq(cq)) + if (hns_roce_ib_destroy_cq(cq, NULL)) dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); return ret; @@ -888,17 +888,17 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) if (!hr_qp) continue; - ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); + ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL); if (ret) dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", i, ret); } - ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq); + ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); if (ret) dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); - hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); + hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); } static int hns_roce_db_init(struct hns_roce_dev *hr_dev) @@ -1096,7 +1096,7 @@ free_work: } static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, - struct hns_roce_mr *mr) + struct hns_roce_mr *mr, struct ib_udata *udata) { struct device *dev = &hr_dev->pdev->dev; struct hns_roce_mr_free_work *mr_work; @@ -3921,7 +3921,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); } -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) +int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); @@ -3998,7 +3998,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) return 0; } -static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) +static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); -- cgit v1.2.3-59-g8ed1b From ff23dfa134576e071ace69e91761d229a0f73139 Mon Sep 17 00:00:00 2001 From: Shamir Rabinovitch Date: Sun, 31 Mar 2019 19:10:07 +0300 Subject: IB: Pass only ib_udata in function prototypes Now when ib_udata is passed to all the driver's object create/destroy APIs the ib_udata will carry the ib_ucontext for every user command. There is no need to also pass the ib_ucontext via the functions prototypes. Make ib_udata the only argument psssed. Signed-off-by: Shamir Rabinovitch Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cq.c | 2 +- drivers/infiniband/core/uverbs_cmd.c | 8 ++-- drivers/infiniband/core/uverbs_std_types_cq.c | 3 +- drivers/infiniband/core/verbs.c | 6 +-- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 21 ++++------- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 4 +- drivers/infiniband/hw/cxgb3/iwch_provider.c | 16 ++++---- drivers/infiniband/hw/cxgb4/cq.c | 9 ++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 - drivers/infiniband/hw/cxgb4/provider.c | 5 +-- drivers/infiniband/hw/hns/hns_roce_cq.c | 23 ++++++------ drivers/infiniband/hw/hns/hns_roce_device.h | 4 +- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 4 +- drivers/infiniband/hw/hns/hns_roce_pd.c | 5 +-- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 20 ++++------ drivers/infiniband/hw/mlx4/cq.c | 28 +++++++------- drivers/infiniband/hw/mlx4/doorbell.c | 6 ++- drivers/infiniband/hw/mlx4/main.c | 6 +-- drivers/infiniband/hw/mlx4/mlx4_ib.h | 4 +- drivers/infiniband/hw/mlx4/qp.c | 8 ++-- drivers/infiniband/hw/mlx4/srq.c | 3 +- drivers/infiniband/hw/mlx5/cq.c | 23 ++++++------ drivers/infiniband/hw/mlx5/main.c | 17 +++++---- drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 +- drivers/infiniband/hw/mlx5/qp.c | 3 +- drivers/infiniband/hw/mthca/mthca_provider.c | 45 +++++++++++----------- drivers/infiniband/hw/nes/nes_verbs.c | 35 ++++++++--------- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 50 +++++++++++++++---------- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 4 +- drivers/infiniband/hw/qedr/verbs.c | 19 ++++++---- drivers/infiniband/hw/qedr/verbs.h | 4 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 4 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 4 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 12 +++--- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 13 ++++--- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 4 +- drivers/infiniband/sw/rdmavt/cq.c | 4 +- drivers/infiniband/sw/rdmavt/cq.h | 1 - drivers/infiniband/sw/rdmavt/mmap.c | 16 +++++--- drivers/infiniband/sw/rdmavt/mmap.h | 6 +-- drivers/infiniband/sw/rdmavt/pd.c | 4 +- drivers/infiniband/sw/rdmavt/pd.h | 3 +- drivers/infiniband/sw/rdmavt/qp.c | 5 +-- drivers/infiniband/sw/rdmavt/srq.c | 6 +-- drivers/infiniband/sw/rxe/rxe_cq.c | 10 ++--- drivers/infiniband/sw/rxe/rxe_loc.h | 16 ++++---- drivers/infiniband/sw/rxe/rxe_mmap.c | 14 ++++--- drivers/infiniband/sw/rxe/rxe_qp.c | 15 +++----- drivers/infiniband/sw/rxe/rxe_queue.c | 22 ++++------- drivers/infiniband/sw/rxe/rxe_queue.h | 15 +++----- drivers/infiniband/sw/rxe/rxe_srq.c | 14 +++---- drivers/infiniband/sw/rxe/rxe_verbs.c | 16 +++----- include/rdma/ib_verbs.h | 5 +-- 53 files changed, 271 insertions(+), 328 deletions(-) (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v1.c') diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 4797eef549c3..a4c81992267c 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -147,7 +147,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, struct ib_cq *cq; int ret = -ENOMEM; - cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL); + cq = dev->ops.create_cq(dev, &cq_attr, NULL); if (IS_ERR(cq)) return cq; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index da31dba33fc5..89b0f5420dfe 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -423,7 +423,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) atomic_set(&pd->usecnt, 0); pd->res.type = RDMA_RESTRACK_PD; - ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata); + ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata); if (ret) goto err_alloc; @@ -594,8 +594,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) } if (!xrcd) { - xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context, - &attrs->driver_udata); + xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata); if (IS_ERR(xrcd)) { ret = PTR_ERR(xrcd); goto err; @@ -1009,8 +1008,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs, attr.comp_vector = cmd->comp_vector; attr.flags = cmd->flags; - cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, - &attrs->driver_udata); + cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_file; diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c index cde608c268ff..977e386009fc 100644 --- a/drivers/infiniband/core/uverbs_std_types_cq.c +++ b/drivers/infiniband/core/uverbs_std_types_cq.c @@ -111,8 +111,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->async_list); - cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, - &attrs->driver_udata); + cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_event_file; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ba9a89df815d..a479f4c12541 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -269,7 +269,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, pd->res.type = RDMA_RESTRACK_PD; rdma_restrack_set_task(&pd->res, caller); - ret = device->ops.alloc_pd(pd, NULL, NULL); + ret = device->ops.alloc_pd(pd, NULL); if (ret) { kfree(pd); return ERR_PTR(ret); @@ -1911,7 +1911,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device, { struct ib_cq *cq; - cq = device->ops.create_cq(device, cq_attr, NULL, NULL); + cq = device->ops.create_cq(device, cq_attr, NULL); if (!IS_ERR(cq)) { cq->device = device; @@ -2142,7 +2142,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); - xrcd = device->ops.alloc_xrcd(device, NULL, NULL); + xrcd = device->ops.alloc_xrcd(device, NULL); if (!IS_ERR(xrcd)) { xrcd->device = device; xrcd->inode = NULL; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a586ac28630b..04e3529ffe06 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -576,14 +576,12 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) &pd->qplib_pd); } -int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext, - struct ib_udata *udata) +int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_re_ucontext *ucntx = container_of(ucontext, - struct bnxt_re_ucontext, - ib_uctx); + struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( + udata, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); int rc; @@ -2589,7 +2587,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); @@ -2616,12 +2613,10 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, if (entries > dev_attr->max_cq_wqes + 1) entries = dev_attr->max_cq_wqes + 1; - if (context) { + if (udata) { struct bnxt_re_cq_req req; - struct bnxt_re_ucontext *uctx = container_of - (context, - struct bnxt_re_ucontext, - ib_uctx); + struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct bnxt_re_ucontext, ib_uctx); if (ib_copy_from_udata(&req, udata, sizeof(req))) { rc = -EFAULT; goto fail; @@ -2672,7 +2667,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, atomic_inc(&rdev->cq_count); spin_lock_init(&cq->cq_lock); - if (context) { + if (udata) { struct bnxt_re_cq_resp resp; resp.cqid = cq->qplib_cq.id; @@ -2690,7 +2685,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, return &cq->ib_cq; c2fail: - if (context) + if (udata) ib_umem_release(cq->umem); fail: kfree(cq->cql); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 44e49988600e..488dc735a260 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -163,8 +163,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, int index, union ib_gid *gid); enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, u8 port_num); -int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, @@ -197,7 +196,6 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index bf07e93aeb94..62b99d26f0d3 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -106,7 +106,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; @@ -114,7 +113,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; - struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; @@ -127,8 +125,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, if (!chp) return ERR_PTR(-ENOMEM); - if (ib_context) { - ucontext = to_iwch_ucontext(ib_context); + if (udata) { if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); @@ -154,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); - if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { + if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) { kfree(chp); return ERR_PTR(-ENOMEM); } @@ -170,8 +167,10 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, return ERR_PTR(-ENOMEM); } - if (ucontext) { + if (udata) { struct iwch_mm_entry *mm; + struct iwch_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct iwch_ucontext, ibucontext); mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { @@ -378,8 +377,7 @@ static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); } -static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct iwch_pd *php = to_iwch_pd(pd); struct ib_device *ibdev = pd->device; @@ -394,7 +392,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, php->pdid = pdid; php->rhp = rhp; - if (context) { + if (udata) { struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 571281888de0..52ce586621c6 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -994,7 +994,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; @@ -1003,10 +1002,11 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, struct c4iw_cq *chp; struct c4iw_create_cq ucmd; struct c4iw_create_cq_resp uresp; - struct c4iw_ucontext *ucontext = NULL; int ret, wr_len; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; + struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct c4iw_ucontext, ibucontext); pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) @@ -1017,8 +1017,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, if (vector >= rhp->rdev.lldi.nciq) return ERR_PTR(-EINVAL); - if (ib_context) { - ucontext = to_c4iw_ucontext(ib_context); + if (udata) { if (udata->inlen < sizeof(ucmd)) ucontext->is_32b_cqe = 1; } @@ -1070,7 +1069,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, /* * memsize must be a multiple of the page size if its a user cq. */ - if (ucontext) + if (udata) memsize = roundup(memsize, PAGE_SIZE); chp->cq.size = hwentries; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 586fd1a00d33..4b721a261053 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -995,7 +995,6 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 12f7d3ae6a53..0fbad47661cc 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -204,8 +204,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) mutex_unlock(&rhp->rdev.stats.lock); } -static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct c4iw_pd *php = to_c4iw_pd(pd); struct ib_device *ibdev = pd->device; @@ -220,7 +219,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, php->pdid = pdid; php->rhp = rhp; - if (context) { + if (udata) { struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 305c362ef5c6..9caf35061721 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -302,7 +302,6 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); @@ -314,6 +313,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, int vector = attr->comp_vector; int cq_entries = attr->cqe; int ret; + struct hns_roce_ucontext *context = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", @@ -332,7 +333,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, hr_cq->ib_cq.cqe = cq_entries - 1; spin_lock_init(&hr_cq->lock); - if (context) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "Failed to copy_from_udata.\n"); ret = -EFAULT; @@ -350,8 +351,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp))) { - ret = hns_roce_db_map_user(to_hr_ucontext(context), - udata, ucmd.db_addr, + ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, &hr_cq->db); if (ret) { dev_err(dev, "cq record doorbell map failed!\n"); @@ -362,7 +362,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, } /* Get user space parameters */ - uar = &to_hr_ucontext(context)->uar; + uar = &context->uar; } else { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); @@ -401,7 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, * problems if tptr is set to zero here, so we initialze it in user * space. */ - if (!context && hr_cq->tptr_addr) + if (!udata && hr_cq->tptr_addr) *hr_cq->tptr_addr = 0; /* Get created cq handler and carry out event */ @@ -409,7 +409,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, hr_cq->event = hns_roce_ib_cq_event; hr_cq->cq_depth = cq_entries; - if (context) { + if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) @@ -422,21 +422,20 @@ err_cqc: hns_roce_free_cq(hr_dev, hr_cq); err_dbmap: - if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp))) - hns_roce_db_unmap_user(to_hr_ucontext(context), - &hr_cq->db); + hns_roce_db_unmap_user(context, &hr_cq->db); err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); - if (context) + if (udata) ib_umem_release(hr_cq->umem); else hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); err_db: - if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) + if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_cq->db); err_cq: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 780a7ba204db..b23b13f06d58 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1112,8 +1112,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); -int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); @@ -1177,7 +1176,6 @@ int to_hr_qp_type(int qp_type); struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 1863516f6be9..98c6a41edefd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -730,7 +730,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) /* Reserved cq for loop qp */ cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; cq_init_attr.comp_vector = 0; - cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL); + cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL); if (IS_ERR(cq)) { dev_err(dev, "Create cq for reserved loop qp failed!"); return -ENOMEM; @@ -749,7 +749,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) goto alloc_mem_failed; pd->device = ibdev; - ret = hns_roce_alloc_pd(pd, NULL, NULL); + ret = hns_roce_alloc_pd(pd, NULL); if (ret) goto alloc_pd_failed; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 504e6e466d72..813401384d78 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -57,8 +57,7 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } -int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ib_dev = ibpd->device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); @@ -72,7 +71,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, return ret; } - if (context) { + if (udata) { struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index fcb9e2448a49..7bf7fe854464 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -291,18 +291,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_ /** * i40iw_alloc_pd - allocate protection domain * @pd: PD pointer - * @context: user context created during alloc * @udata: user data */ -static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_device *iwdev = to_iwdev(pd->device); struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_alloc_pd_resp uresp; struct i40iw_sc_pd *sc_pd; - struct i40iw_ucontext *ucontext; u32 pd_id = 0; int err; @@ -318,8 +315,9 @@ static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, sc_pd = &iwpd->sc_pd; - if (context) { - ucontext = to_ucontext(context); + if (udata) { + struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct i40iw_ucontext, ibucontext); dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); memset(&uresp, 0, sizeof(uresp)); uresp.pd_id = pd_id; @@ -1091,12 +1089,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) * i40iw_create_cq - create cq * @ibdev: device pointer from stack * @attr: attributes for cq - * @context: user context created during alloc * @udata: user data */ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct i40iw_device *iwdev = to_iwdev(ibdev); @@ -1146,14 +1142,14 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, info.ceq_id_valid = true; info.ceqe_mask = 1; info.type = I40IW_CQ_TYPE_IWARP; - if (context) { - struct i40iw_ucontext *ucontext; + if (udata) { + struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct i40iw_ucontext, ibucontext); struct i40iw_create_cq_req req; struct i40iw_cq_mr *cqmr; memset(&req, 0, sizeof(req)); iwcq->user_mode = true; - ucontext = to_ucontext(context); if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { err_code = -EFAULT; goto cq_free_resources; @@ -1223,7 +1219,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, goto cq_free_resources; } - if (context) { + if (udata) { struct i40iw_create_cq_resp resp; memset(&resp, 0, sizeof(resp)); diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5403a1ff7cc2..022a0b4ea452 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -174,7 +174,6 @@ err_buf: #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -184,6 +183,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, struct mlx4_uar *uar; void *buf_addr; int err; + struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx4_ib_ucontext, ibucontext); if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); @@ -205,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); - if (context) { + if (udata) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { @@ -219,12 +220,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, if (err) goto err_cq; - err = mlx4_ib_db_map_user(to_mucontext(context), udata, - ucmd.db_addr, &cq->db); + err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); if (err) goto err_mtt; - uar = &to_mucontext(context)->uar; + uar = &context->uar; cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); @@ -249,21 +249,21 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, if (dev->eq_table) vector = dev->eq_table[vector % ibdev->num_comp_vectors]; - err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0, + err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, + &cq->mcq, vector, 0, !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), - buf_addr, !!context); + buf_addr, !!udata); if (err) goto err_dbmap; - if (context) + if (udata) cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; else cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; - if (context) + if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_cq_free; @@ -275,19 +275,19 @@ err_cq_free: mlx4_cq_free(dev->dev, &cq->mcq); err_dbmap: - if (context) - mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); + if (udata) + mlx4_ib_db_unmap_user(context, &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); - if (context) + if (udata) ib_umem_release(cq->umem); else mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: - if (!context) + if (!udata) mlx4_db_free(dev->dev, &cq->db); err_cq: diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 3aab71b29ce8..0f390351cef0 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c @@ -31,6 +31,7 @@ */ #include +#include #include "mlx4_ib.h" @@ -41,12 +42,13 @@ struct mlx4_ib_user_db_page { int refcnt; }; -int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, struct mlx4_db *db) { struct mlx4_ib_user_db_page *page; int err = 0; + struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx4_ib_ucontext, ibucontext); mutex_lock(&context->db_page_mutex); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e50f9de71119..952b1bac46db 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1177,8 +1177,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) } } -static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx4_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; @@ -1188,7 +1187,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, if (err) return err; - if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); return -EFAULT; } @@ -1201,7 +1200,6 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) } static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, struct ib_udata *udata) { struct mlx4_ib_xrcd *xrcd; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 58112b59cc7c..79143848b560 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -723,8 +723,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); -int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, struct mlx4_db *db); void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); @@ -746,7 +745,6 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 99ceffe5cfec..364e16b5f8e1 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1041,11 +1041,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_mtt; if (qp_has_rq(init_attr)) { - err = mlx4_ib_db_map_user( - context, udata, - (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr : + err = mlx4_ib_db_map_user(udata, + (src == MLX4_IB_QP_SRC) ? + ucmd.qp.db_addr : ucmd.wq.db_addr, - &qp->db); + &qp->db); if (err) goto err_mtt; } diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 2a20205d1662..94c3c334a672 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -131,8 +131,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_mtt; - err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr, - &srq->db); + err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5bed098ccdef..2e2e65f00257 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -679,8 +679,7 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format) } static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, - struct ib_ucontext *context, struct mlx5_ib_cq *cq, - int entries, u32 **cqb, + struct mlx5_ib_cq *cq, int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) { struct mlx5_ib_create_cq ucmd = {}; @@ -691,6 +690,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, int ncont; void *cqc; int err; + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx5_ib_ucontext, ibucontext); ucmdlen = udata->inlen < sizeof(ucmd) ? (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); @@ -715,8 +716,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, return err; } - err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr, - &cq->db); + err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); if (err) goto err_umem; @@ -740,7 +740,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = to_mucontext(context)->bfregi.sys_pages[0]; + *index = context->bfregi.sys_pages[0]; if (ucmd.cqe_comp_en == 1) { int mini_cqe_format; @@ -782,14 +782,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; } - MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid); + MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid); return 0; err_cqb: kvfree(*cqb); err_db: - mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + mlx5_ib_db_unmap_user(context, &cq->db); err_umem: ib_umem_release(cq->buf.umem); @@ -886,7 +886,6 @@ static void notify_soft_wc_handler(struct work_struct *work) struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -927,8 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->list_recv_qp); if (udata) { - err = create_cq_user(dev, udata, context, cq, entries, - &cqb, &cqe_size, &index, &inlen); + err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, + &index, &inlen); if (err) goto err_create; } else { @@ -965,7 +964,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); cq->mcq.irqn = irqn; - if (context) + if (udata) cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; else cq->mcq.comp = mlx5_ib_cq_comp; @@ -973,7 +972,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->wc_list); - if (context) + if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { err = -EFAULT; goto err_cmd; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 769a5952a0f6..f706e1bd40ad 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2341,8 +2341,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) return 0; } -static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx5_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; @@ -2351,8 +2350,10 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx5_ib_ucontext, ibucontext); - uid = context ? to_mucontext(context)->devx_uid : 0; + uid = context ? context->devx_uid : 0; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, uid, uid); err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), @@ -2362,7 +2363,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->uid = uid; - if (context) { + if (udata) { resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); @@ -4749,11 +4750,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0); - ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL); + ret = mlx5_ib_alloc_pd(devr->p0, NULL); if (ret) goto error0; - devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; @@ -4765,7 +4766,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) devr->c0->cq_context = NULL; atomic_set(&devr->c0->usecnt, 0); - devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x0)) { ret = PTR_ERR(devr->x0); goto error2; @@ -4776,7 +4777,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) mutex_init(&devr->x0->tgt_qp_mutex); INIT_LIST_HEAD(&devr->x0->tgt_qp_list); - devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x1)) { ret = PTR_ERR(devr->x1); goto error3; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e45f59b0cc52..f7314d78aafd 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1083,7 +1083,6 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, int buflen, size_t *bc); struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); @@ -1123,8 +1122,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_mad_hdr *out, size_t *out_mad_size, u16 *out_mad_pkey_index); struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); + struct ib_udata *udata); int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 940ac1caa590..3470a9c496b1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -5632,8 +5632,7 @@ out: } struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) + struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_xrcd *xrcd; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 9e4efd58c119..9a77374a327b 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -363,18 +363,17 @@ static int mthca_mmap_uar(struct ib_ucontext *context, return 0; } -static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct mthca_pd *pd = to_mpd(ibpd); int err; - err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); + err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); if (err) return err; - if (context) { + if (udata) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); return -EFAULT; @@ -634,7 +633,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -642,6 +640,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, struct mthca_cq *cq; int nent; int err; + struct mthca_ucontext *context = rdma_udata_to_drv_context( + udata, struct mthca_ucontext, ibucontext); if (attr->flags) return ERR_PTR(-EINVAL); @@ -649,19 +649,19 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return ERR_PTR(-EINVAL); - if (context) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); - err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, - ucmd.set_db_index, ucmd.set_db_page); + err = mthca_map_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.set_db_index, + ucmd.set_db_page); if (err) return ERR_PTR(err); - err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, - ucmd.arm_db_index, ucmd.arm_db_page); + err = mthca_map_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.arm_db_index, + ucmd.arm_db_page); if (err) goto err_unmap_set; } @@ -672,7 +672,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, goto err_unmap_arm; } - if (context) { + if (udata) { cq->buf.mr.ibmr.lkey = ucmd.lkey; cq->set_ci_db_index = ucmd.set_db_index; cq->arm_db_index = ucmd.arm_db_index; @@ -681,14 +681,13 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, for (nent = 1; nent <= entries; nent <<= 1) ; /* nothing */ - err = mthca_init_cq(to_mdev(ibdev), nent, - context ? to_mucontext(context) : NULL, - context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, + err = mthca_init_cq(to_mdev(ibdev), nent, context, + udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, cq); if (err) goto err_free; - if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { + if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { mthca_free_cq(to_mdev(ibdev), cq); err = -EFAULT; goto err_free; @@ -702,14 +701,14 @@ err_free: kfree(cq); err_unmap_arm: - if (context) - mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, ucmd.arm_db_index); + if (udata) + mthca_unmap_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.arm_db_index); err_unmap_set: - if (context) - mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, ucmd.set_db_index); + if (udata) + mthca_unmap_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.set_db_index); return ERR_PTR(err); } diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 244255b1e940..a3b5e8eecb98 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -640,22 +640,24 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) /** * nes_alloc_pd */ -static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int nes_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct ib_device *ibdev = pd->device; struct nes_pd *nespd = to_nespd(pd); struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; - struct nes_ucontext *nesucontext; struct nes_alloc_pd_resp uresp; u32 pd_num = 0; int err; + struct nes_ucontext *nesucontext = rdma_udata_to_drv_context( + udata, struct nes_ucontext, ibucontext); - nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", - nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context, - netdev_refcnt_read(nesvnic->netdev)); + nes_debug( + NES_DBG_PD, + "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", + nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, + &nesucontext->ibucontext, netdev_refcnt_read(nesvnic->netdev)); err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD); @@ -667,8 +669,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd; - if (context) { - nesucontext = to_nesucontext(context); + if (udata) { nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells, NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", @@ -1375,7 +1376,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) */ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -1420,9 +1420,10 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, nescq->hw_cq.cq_number = cq_num; nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; + if (udata) { + struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context( + udata, struct nes_ucontext, ibucontext); - if (context) { - nes_ucontext = to_nesucontext(context); if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) { nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); kfree(nescq); @@ -1489,7 +1490,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n"); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1518,7 +1519,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, if (nesadapter->free_4kpbl == 0) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); nes_free_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1540,7 +1541,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, if (nesadapter->free_256pbl == 0) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); nes_free_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1566,7 +1567,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16))); - if (context) { + if (udata) { if (pbl_entries != 1) u64temp = (u64)nespbl->pbl_pbase; else @@ -1597,7 +1598,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, nescq->hw_cq.cq_number, ret); if ((!ret) || (cqp_request->major_code)) { nes_put_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1611,7 +1612,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, } nes_put_cqp_request(nesdev, cqp_request); - if (context) { + if (udata) { /* free the nespbl */ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, nespbl->pbl_pbase); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index b8f891660516..cf7aeb963dce 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -47,6 +47,7 @@ #include #include #include +#include #include "ocrdma.h" #include "ocrdma_hw.h" @@ -367,6 +368,16 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) return status; } +/* + * NOTE: + * + * ocrdma_ucontext must be used here because this function is also + * called from ocrdma_alloc_ucontext where ib_udata does not have + * valid ib_ucontext pointer. ib_uverbs_get_context does not call + * uobj_{alloc|get_xxx} helpers which are used to store the + * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so + * ib_udata does NOT imply valid ib_ucontext here! + */ static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, struct ocrdma_ucontext *uctx, struct ib_udata *udata) @@ -593,7 +604,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) } static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) { int status; @@ -601,7 +611,8 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, u64 dpp_page_addr = 0; u32 db_page_size; struct ocrdma_alloc_pd_uresp rsp; - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct ocrdma_ucontext, ibucontext); memset(&rsp, 0, sizeof(rsp)); rsp.id = pd->id; @@ -639,18 +650,17 @@ dpp_map_err: return status; } -int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_pd *pd; - struct ocrdma_ucontext *uctx = NULL; int status; u8 is_uctx_pd = false; + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct ocrdma_ucontext, ibucontext); - if (udata && context) { - uctx = get_ocrdma_ucontext(context); + if (udata) { pd = ocrdma_get_ucontext_pd(uctx); if (pd) { is_uctx_pd = true; @@ -664,8 +674,8 @@ int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, goto exit; pd_mapping: - if (udata && context) { - status = ocrdma_copy_pd_uresp(dev, pd, context, udata); + if (udata) { + status = ocrdma_copy_pd_uresp(dev, pd, udata); if (status) goto err; } @@ -946,13 +956,17 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) } static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, - struct ib_udata *udata, - struct ib_ucontext *ib_ctx) + struct ib_udata *udata) { int status; - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct ocrdma_ucontext, ibucontext); struct ocrdma_create_cq_uresp uresp; + /* this must be user flow! */ + if (!udata) + return -EINVAL; + memset(&uresp, 0, sizeof(uresp)); uresp.cq_id = cq->id; uresp.page_size = PAGE_ALIGN(cq->len); @@ -983,13 +997,13 @@ err: struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) { int entries = attr->cqe; struct ocrdma_cq *cq; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); - struct ocrdma_ucontext *uctx = NULL; + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct ocrdma_ucontext, ibucontext); u16 pd_id = 0; int status; struct ocrdma_create_cq_ureq ureq; @@ -1011,18 +1025,16 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->rq_head); - if (ib_ctx) { - uctx = get_ocrdma_ucontext(ib_ctx); + if (udata) pd_id = uctx->cntxt_pd->id; - } status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); if (status) { kfree(cq); return ERR_PTR(status); } - if (ib_ctx) { - status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); + if (udata) { + status = ocrdma_copy_cq_uresp(dev, cq, udata); if (status) goto ctx_err; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 3636cbcbcaa4..dfdebe4e48e6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -69,13 +69,11 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, - struct ib_udata *udata); +int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4cd16ad16430..44ab86718c2f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include "qedr_hsi_rdma.h" @@ -436,8 +437,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot); } -int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct qedr_dev *dev = get_qedr_dev(ibdev); @@ -446,7 +446,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, int rc; DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", - (udata && context) ? "User Lib" : "Kernel"); + udata ? "User Lib" : "Kernel"); if (!dev->rdma_ctx) { DP_ERR(dev, "invalid RDMA context\n"); @@ -459,10 +459,12 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, pd->pd_id = pd_id; - if (udata && context) { + if (udata) { struct qedr_alloc_pd_uresp uresp = { .pd_id = pd_id, }; + struct qedr_ucontext *context = rdma_udata_to_drv_context( + udata, struct qedr_ucontext, ibucontext); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) { @@ -471,7 +473,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, return rc; } - pd->uctx = get_qedr_ucontext(context); + pd->uctx = context; pd->uctx->pd = pd; } @@ -816,9 +818,10 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) struct ib_cq *qedr_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) + struct ib_udata *udata) { - struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx); + struct qedr_ucontext *ctx = rdma_udata_to_drv_context( + udata, struct qedr_ucontext, ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; struct qedr_dev *dev = get_qedr_dev(ibdev); @@ -906,7 +909,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, cq->sig = QEDR_CQ_MAGIC_NUMBER; spin_lock_init(&cq->cq_lock); - if (ib_ctx) { + if (udata) { rc = qedr_copy_cq_uresp(dev, cq, udata); if (rc) goto err3; diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index cd9659ac2aad..46a9828b9777 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -47,13 +47,11 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void qedr_dealloc_ucontext(struct ib_ucontext *uctx); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, - struct ib_udata *udata); +int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_cq *qedr_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index e282eea8ecce..e9352750e029 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -447,8 +447,7 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, return 0; } -int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); void *umem_pd; @@ -590,7 +589,6 @@ out_unlock: struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct ib_cq *cq; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 349c8dc13a12..028f322f8e9b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -50,8 +50,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid); int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); -int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata); +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, @@ -61,7 +60,6 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 5ba278324134..d7deb19a2800 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "pvrdma.h" @@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, * pvrdma_create_cq - create completion queue * @ibdev: the device * @attr: completion queue attributes - * @context: user context * @udata: user data * * @return: ib_cq completion queue pointer on success, @@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, */ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -116,6 +115,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; struct pvrdma_create_cq_resp cq_resp = {0}; struct pvrdma_create_cq ucmd; + struct pvrdma_ucontext *context = rdma_udata_to_drv_context( + udata, struct pvrdma_ucontext, ibucontext); BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -133,7 +134,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, } cq->ibcq.cqe = entries; - cq->is_kernel = !context; + cq->is_kernel = !udata; if (!cq->is_kernel) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { @@ -185,8 +186,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; cmd->nchunks = npages; - cmd->ctx_handle = (context) ? - (u64)to_vucontext(context)->ctx_handle : 0; + cmd->ctx_handle = context ? context->ctx_handle : 0; cmd->cqe = entries; cmd->pdir_dma = cq->pdir.dir_dma; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); @@ -204,7 +204,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); if (!cq->is_kernel) { - cq->uar = &(to_vucontext(context)->uar); + cq->uar = &context->uar; /* Copy udata back. */ if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 19ff6004b477..0302fa3b6c85 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -50,6 +50,7 @@ #include #include #include +#include #include "pvrdma.h" @@ -419,13 +420,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) /** * pvrdma_alloc_pd - allocate protection domain * @ibpd: PD pointer - * @context: user context * @udata: user data * * @return: the ib_pd protection domain pointer on success, otherwise errno. */ -int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct pvrdma_pd *pd = to_vpd(ibpd); @@ -436,13 +435,15 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; + struct pvrdma_ucontext *context = rdma_udata_to_drv_context( + udata, struct pvrdma_ucontext, ibucontext); /* Check allowed max pds */ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) return -ENOMEM; cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; - cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; + cmd->ctx_handle = context ? context->ctx_handle : 0; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, @@ -451,12 +452,12 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, goto err; } - pd->privileged = !context; + pd->privileged = !udata; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; pd_resp.pdn = resp->pd_handle; - if (context) { + if (udata) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 2c8ba5bf8d0f..562b70e70e79 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -398,8 +398,7 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void pvrdma_dealloc_ucontext(struct ib_ucontext *context); -int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, @@ -412,7 +411,6 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 6f7ff2384506..a06e6da7a026 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -168,7 +168,6 @@ static void send_complete(struct work_struct *work) * rvt_create_cq - create a completion queue * @ibdev: the device this completion queue is attached to * @attr: creation attributes - * @context: unused by the QLogic_IB driver * @udata: user data for libibverbs.so * * Called by ib_create_cq() in the generic verbs code. @@ -178,7 +177,6 @@ static void send_complete(struct work_struct *work) */ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); @@ -232,7 +230,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, if (udata && udata->outlen >= sizeof(__u64)) { int err; - cq->ip = rvt_create_mmap_info(rdi, sz, context, wc); + cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc); if (!cq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wc; diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h index e42661ecdef8..3ad6faf18ecb 100644 --- a/drivers/infiniband/sw/rdmavt/cq.h +++ b/drivers/infiniband/sw/rdmavt/cq.h @@ -53,7 +53,6 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index 6b712eecbd37..652f4a7efc1b 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "mmap.h" /** @@ -150,18 +151,19 @@ done: * rvt_create_mmap_info - allocate information for hfi1_mmap * @rdi: rvt dev struct * @size: size in bytes to map - * @context: user context + * @udata: user data (must be valid!) * @obj: opaque pointer to a cq, wq etc * * Return: rvt_mmap struct on success */ -struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, - u32 size, - struct ib_ucontext *context, - void *obj) +struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, + struct ib_udata *udata, void *obj) { struct rvt_mmap_info *ip; + if (!udata) + return ERR_PTR(-EINVAL); + ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); if (!ip) return ip; @@ -177,7 +179,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, INIT_LIST_HEAD(&ip->pending_mmaps); ip->size = size; - ip->context = context; + ip->context = + container_of(udata, struct uverbs_attr_bundle, driver_udata) + ->context; ip->obj = obj; kref_init(&ip->ref); diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h index fab0e7b1daf9..02466c40bc1e 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.h +++ b/drivers/infiniband/sw/rdmavt/mmap.h @@ -53,10 +53,8 @@ void rvt_mmap_init(struct rvt_dev_info *rdi); void rvt_release_mmap_info(struct kref *ref); int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); -struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, - u32 size, - struct ib_ucontext *context, - void *obj); +struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, + struct ib_udata *udata, void *obj); void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, u32 size, void *obj); diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c index e84341282374..a403718f0b5e 100644 --- a/drivers/infiniband/sw/rdmavt/pd.c +++ b/drivers/infiniband/sw/rdmavt/pd.c @@ -51,15 +51,13 @@ /** * rvt_alloc_pd - allocate a protection domain * @ibpd: PD - * @context: optional user context * @udata: optional user data * * Allocate and keep track of a PD. * * Return: 0 on success */ -int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct rvt_dev_info *dev = ib_to_rvt(ibdev); diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h index d0368a625e03..71ba76d72b1d 100644 --- a/drivers/infiniband/sw/rdmavt/pd.h +++ b/drivers/infiniband/sw/rdmavt/pd.h @@ -50,8 +50,7 @@ #include -int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); #endif /* DEF_RDMAVTPD_H */ diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index e8bba7e56c29..90ed99f4b026 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -957,8 +957,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, size_t sg_list_sz; struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); - struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( - udata, struct rvt_ucontext, ibucontext); void *priv = NULL; size_t sqsize; @@ -1131,8 +1129,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, } else { u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; - qp->ip = rvt_create_mmap_info(rdi, s, - &ucontext->ibucontext, + qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->r_rq.wq); if (!qp->ip) { ret = ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 3090b0935714..21d276eaf15a 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c @@ -78,8 +78,6 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, struct ib_udata *udata) { struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); - struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( - udata, struct rvt_ucontext, ibucontext); struct rvt_srq *srq; u32 sz; struct ib_srq *ret; @@ -121,9 +119,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, int err; u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; - srq->ip = - rvt_create_mmap_info(dev, s, &ucontext->ibucontext, - srq->rq.wq); + srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); if (!srq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wq; diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index a57276f2cb84..ad3090131126 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data) } int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, - int comp_vector, struct ib_ucontext *context, + int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) { int err; @@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, return -ENOMEM; } - err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); if (err) { vfree(cq->queue->buf); @@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, } int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, - struct rxe_resize_cq_resp __user *uresp) + struct rxe_resize_cq_resp __user *uresp, + struct ib_udata *udata) { int err; err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe, - sizeof(struct rxe_cqe), - cq->queue->ip ? cq->queue->ip->context : NULL, + sizeof(struct rxe_cqe), udata, uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock); if (!err) cq->ibcq.cqe = cqe; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 3d8cef836f0d..775c23becaec 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector); int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, - int comp_vector, struct ib_ucontext *context, + int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp); int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, - struct rxe_resize_cq_resp __user *uresp); + struct rxe_resize_cq_resp __user *uresp, + struct ib_udata *udata); int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); @@ -91,10 +92,8 @@ struct rxe_mmap_info { void rxe_mmap_release(struct kref *ref); -struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, - u32 size, - struct ib_ucontext *context, - void *obj); +struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size, + struct ib_udata *udata, void *obj); int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); @@ -224,13 +223,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, - struct ib_srq_init_attr *init, - struct ib_ucontext *context, + struct ib_srq_init_attr *init, struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp); int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, - struct rxe_modify_srq_cmd *ucmd); + struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata); void rxe_dealloc(struct ib_device *ib_dev); diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index d22431e3a908..48f48122ddcb 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "rxe.h" #include "rxe_loc.h" @@ -140,13 +141,14 @@ done: /* * Allocate information for rxe_mmap */ -struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, - u32 size, - struct ib_ucontext *context, - void *obj) +struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, + struct ib_udata *udata, void *obj) { struct rxe_mmap_info *ip; + if (!udata) + return ERR_PTR(-EINVAL); + ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) return NULL; @@ -165,7 +167,9 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, INIT_LIST_HEAD(&ip->pending_mmaps); ip->info.size = size; - ip->context = context; + ip->context = + container_of(udata, struct uverbs_attr_bundle, driver_udata) + ->context; ip->obj = obj; kref_init(&ip->ref); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 09ede70dc1e8..e2c6d1cedf41 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -217,8 +217,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, } static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, - struct ib_qp_init_attr *init, - struct ib_ucontext *context, + struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) { int err; @@ -254,7 +253,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, if (!qp->sq.queue) return -ENOMEM; - err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); @@ -287,7 +286,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) { int err; @@ -308,7 +307,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, if (!qp->rq.queue) return -ENOMEM; - err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { @@ -344,8 +343,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; - struct rxe_ucontext *ucontext = - rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc); rxe_add_ref(pd); rxe_add_ref(rcq); @@ -360,11 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, rxe_qp_init_misc(rxe, qp, init); - err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp); + err = rxe_qp_init_req(rxe, qp, init, udata, uresp); if (err) goto err1; - err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp); + err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); if (err) goto err2; diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index f84ab4469261..ff92704de32f 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -36,18 +36,15 @@ #include "rxe_loc.h" #include "rxe_queue.h" -int do_mmap_info(struct rxe_dev *rxe, - struct mminfo __user *outbuf, - struct ib_ucontext *context, - struct rxe_queue_buf *buf, - size_t buf_size, - struct rxe_mmap_info **ip_p) +int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, + struct ib_udata *udata, struct rxe_queue_buf *buf, + size_t buf_size, struct rxe_mmap_info **ip_p) { int err; struct rxe_mmap_info *ip = NULL; if (outbuf) { - ip = rxe_create_mmap_info(rxe, buf_size, context, buf); + ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); if (!ip) goto err1; @@ -153,12 +150,9 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, return 0; } -int rxe_queue_resize(struct rxe_queue *q, - unsigned int *num_elem_p, - unsigned int elem_size, - struct ib_ucontext *context, - struct mminfo __user *outbuf, - spinlock_t *producer_lock, +int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, + unsigned int elem_size, struct ib_udata *udata, + struct mminfo __user *outbuf, spinlock_t *producer_lock, spinlock_t *consumer_lock) { struct rxe_queue *new_q; @@ -170,7 +164,7 @@ int rxe_queue_resize(struct rxe_queue *q, if (!new_q) return -ENOMEM; - err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf, + err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf, new_q->buf_size, &new_q->ip); if (err) { vfree(new_q->buf); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 79ba4b320054..acd0a925481c 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -76,12 +76,9 @@ struct rxe_queue { unsigned int index_mask; }; -int do_mmap_info(struct rxe_dev *rxe, - struct mminfo __user *outbuf, - struct ib_ucontext *context, - struct rxe_queue_buf *buf, - size_t buf_size, - struct rxe_mmap_info **ip_p); +int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, + struct ib_udata *udata, struct rxe_queue_buf *buf, + size_t buf_size, struct rxe_mmap_info **ip_p); void rxe_queue_reset(struct rxe_queue *q); @@ -89,10 +86,8 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, unsigned int elem_size); -int rxe_queue_resize(struct rxe_queue *q, - unsigned int *num_elem_p, - unsigned int elem_size, - struct ib_ucontext *context, +int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, + unsigned int elem_size, struct ib_udata *udata, struct mminfo __user *outbuf, /* Protect producers while resizing queue */ spinlock_t *producer_lock, diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index c41a5fee81f7..d8459431534e 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -99,8 +99,7 @@ err1: } int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, - struct ib_srq_init_attr *init, - struct ib_ucontext *context, + struct ib_srq_init_attr *init, struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp) { int err; @@ -128,7 +127,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->rq.queue = q; - err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf, + err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, q->buf_size, &q->ip); if (err) { vfree(q->buf); @@ -149,7 +148,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, - struct rxe_modify_srq_cmd *ucmd) + struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) { int err; struct rxe_queue *q = srq->rq.queue; @@ -163,11 +162,8 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, mi = u64_to_user_ptr(ucmd->mmap_info_addr); err = rxe_queue_resize(q, &attr->max_wr, - rcv_wqe_size(srq->rq.max_sge), - srq->rq.queue->ip ? - srq->rq.queue->ip->context : - NULL, - mi, &srq->rq.producer_lock, + rcv_wqe_size(srq->rq.max_sge), udata, mi, + &srq->rq.producer_lock, &srq->rq.consumer_lock); if (err) goto err2; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index e625731ae42d..4f581af2ad54 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num, return 0; } -static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); @@ -305,8 +304,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_ucontext *ucontext = - rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc); struct rxe_srq *srq; struct rxe_create_srq_resp __user *uresp = NULL; @@ -330,7 +327,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, rxe_add_ref(pd); srq->pd = pd; - err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp); + err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) goto err2; @@ -366,7 +363,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (err) goto err1; - err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd); + err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); if (err) goto err1; @@ -799,7 +796,6 @@ err1: static struct ib_cq *rxe_create_cq(struct ib_device *dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int err; @@ -826,8 +822,8 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev, goto err1; } - err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, - context, uresp); + err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, + uresp); if (err) goto err2; @@ -866,7 +862,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (err) goto err1; - err = rxe_cq_resize_queue(cq, cqe, uresp); + err = rxe_cq_resize_queue(cq, cqe, uresp, udata); if (err) goto err1; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 54e48dd36644..0e24f6b6c61d 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2394,8 +2394,7 @@ struct ib_device_ops { void (*dealloc_ucontext)(struct ib_ucontext *context); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); - int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); + int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); struct ib_ah *(*create_ah)(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, @@ -2421,7 +2420,6 @@ struct ib_device_ops { int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); struct ib_cq *(*create_cq)(struct ib_device *device, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); @@ -2456,7 +2454,6 @@ struct ib_device_ops { int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, - struct ib_ucontext *ucontext, struct ib_udata *udata); int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); struct ib_flow *(*create_flow)(struct ib_qp *qp, -- cgit v1.2.3-59-g8ed1b From fb24ea52f78e0d595852e09e3a55697c8f442189 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 17:14:59 +0000 Subject: drivers: Remove explicit invocations of mmiowb() mmiowb() is now implied by spin_unlock() on architectures that require it, so there is no reason to call it from driver code. This patch was generated using coccinelle: @mmiowb@ @@ - mmiowb(); and invoked as: $ for d in drivers include/linux/qed sound; do \ spatch --include-headers --sp-file mmiowb.cocci --dir $d --in-place; done NOTE: mmiowb() has only ever guaranteed ordering in conjunction with spin_unlock(). However, pairing each mmiowb() removal in this patch with the corresponding call to spin_unlock() is not at all trivial, so there is a small chance that this change may regress any drivers incorrectly relying on mmiowb() to order MMIO writes between CPUs using lock-free synchronisation. If you've ended up bisecting to this commit, you can reintroduce the mmiowb() calls using wmb() instead, which should restore the old behaviour on all architectures other than some esoteric ia64 systems. Acked-by: Linus Torvalds Signed-off-by: Will Deacon --- drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | 4 --- drivers/dma/txx9dmac.c | 3 --- drivers/firewire/ohci.c | 1 - drivers/gpu/drm/i915/intel_hdmi.c | 10 -------- drivers/ide/tx4939ide.c | 2 -- drivers/infiniband/hw/hfi1/chip.c | 3 --- drivers/infiniband/hw/hfi1/pio.c | 1 - drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 2 -- drivers/infiniband/hw/mlx4/qp.c | 6 ----- drivers/infiniband/hw/mlx5/qp.c | 1 - drivers/infiniband/hw/mthca/mthca_cmd.c | 6 ----- drivers/infiniband/hw/mthca/mthca_cq.c | 5 ---- drivers/infiniband/hw/mthca/mthca_qp.c | 17 ------------- drivers/infiniband/hw/mthca/mthca_srq.c | 6 ----- drivers/infiniband/hw/qedr/verbs.c | 12 --------- drivers/infiniband/hw/qib/qib_iba6120.c | 4 --- drivers/infiniband/hw/qib/qib_iba7220.c | 3 --- drivers/infiniband/hw/qib/qib_iba7322.c | 3 --- drivers/infiniband/hw/qib/qib_sd7220.c | 4 --- drivers/media/pci/dt3155/dt3155.c | 8 ------ drivers/memstick/host/jmb38x_ms.c | 4 --- drivers/misc/ioc4.c | 2 -- drivers/misc/mei/hw-me.c | 3 --- drivers/misc/tifm_7xx1.c | 1 - drivers/mmc/host/alcor.c | 1 - drivers/mmc/host/sdhci.c | 13 ---------- drivers/mmc/host/tifm_sd.c | 3 --- drivers/mmc/host/via-sdmmc.c | 10 -------- drivers/mtd/nand/raw/r852.c | 2 -- drivers/mtd/nand/raw/txx9ndfmc.c | 1 - drivers/net/ethernet/aeroflex/greth.c | 1 - drivers/net/ethernet/alacritech/slicoss.c | 4 --- drivers/net/ethernet/amazon/ena/ena_com.c | 1 - drivers/net/ethernet/atheros/atlx/atl1.c | 1 - drivers/net/ethernet/atheros/atlx/atl2.c | 1 - drivers/net/ethernet/broadcom/bnx2.c | 4 --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 -- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 4 --- .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 29 ---------------------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 2 -- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 4 --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 --- drivers/net/ethernet/broadcom/tg3.c | 6 ----- .../net/ethernet/cavium/liquidio/cn66xx_device.c | 10 -------- .../net/ethernet/cavium/liquidio/octeon_device.c | 1 - drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 4 --- .../net/ethernet/cavium/liquidio/request_manager.c | 1 - drivers/net/ethernet/intel/e1000/e1000_main.c | 5 ---- drivers/net/ethernet/intel/e1000e/netdev.c | 7 ------ drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 2 -- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 5 ---- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 ---- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 5 ---- drivers/net/ethernet/intel/ice/ice_txrx.c | 5 ---- drivers/net/ethernet/intel/igb/igb_main.c | 5 ---- drivers/net/ethernet/intel/igbvf/netdev.c | 4 --- drivers/net/ethernet/intel/igc/igc_main.c | 5 ---- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 ---- drivers/net/ethernet/marvell/sky2.c | 4 --- drivers/net/ethernet/mellanox/mlx4/catas.c | 4 --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 13 ---------- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1 - drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 -- drivers/net/ethernet/neterion/s2io.c | 2 -- drivers/net/ethernet/neterion/vxge/vxge-main.c | 5 ---- drivers/net/ethernet/neterion/vxge/vxge-traffic.c | 4 --- drivers/net/ethernet/qlogic/qed/qed_int.c | 13 ---------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 3 --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 8 ------ drivers/net/ethernet/qlogic/qede/qede_fp.c | 8 ------ drivers/net/ethernet/qlogic/qla3xxx.c | 1 - drivers/net/ethernet/qlogic/qlge/qlge.h | 1 - drivers/net/ethernet/qlogic/qlge/qlge_main.c | 1 - drivers/net/ethernet/renesas/ravb_main.c | 9 ------- drivers/net/ethernet/renesas/ravb_ptp.c | 3 --- drivers/net/ethernet/renesas/sh_eth.c | 1 - drivers/net/ethernet/sfc/falcon/io.h | 2 -- drivers/net/ethernet/sfc/io.h | 2 -- drivers/net/ethernet/silan/sc92031.c | 14 ----------- drivers/net/ethernet/via/via-rhine.c | 3 --- drivers/net/ethernet/wiznet/w5100.c | 6 ----- drivers/net/ethernet/wiznet/w5300.c | 15 ----------- drivers/net/wireless/ath/ath5k/base.c | 4 --- drivers/net/wireless/ath/ath5k/mac80211-ops.c | 2 -- drivers/net/wireless/broadcom/b43/main.c | 7 ------ drivers/net/wireless/broadcom/b43/sysfs.c | 1 - drivers/net/wireless/broadcom/b43legacy/ilt.c | 2 -- drivers/net/wireless/broadcom/b43legacy/main.c | 20 --------------- drivers/net/wireless/broadcom/b43legacy/phy.c | 1 - drivers/net/wireless/broadcom/b43legacy/pio.h | 1 - drivers/net/wireless/broadcom/b43legacy/radio.c | 4 --- drivers/net/wireless/broadcom/b43legacy/sysfs.c | 1 - drivers/net/wireless/intel/iwlegacy/common.h | 7 ------ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 1 - drivers/ntb/hw/idt/ntb_hw_idt.c | 7 ------ drivers/ntb/test/ntb_perf.c | 3 --- drivers/scsi/bfa/bfa.h | 3 +-- drivers/scsi/bfa/bfa_hw_cb.c | 2 -- drivers/scsi/bfa/bfa_hw_ct.c | 2 -- drivers/scsi/bnx2fc/bnx2fc_hwi.c | 2 -- drivers/scsi/bnx2i/bnx2i_hwi.c | 3 --- drivers/scsi/megaraid/megaraid_sas_base.c | 1 - drivers/scsi/megaraid/megaraid_sas_fusion.c | 1 - drivers/scsi/mpt3sas/mpt3sas_base.c | 1 - drivers/scsi/qedf/qedf_io.c | 1 - drivers/scsi/qedi/qedi_fw.c | 1 - drivers/scsi/qla1280.c | 5 ---- drivers/ssb/pci.c | 1 - drivers/ssb/pcmcia.c | 4 --- drivers/staging/comedi/drivers/mite.c | 3 --- drivers/staging/comedi/drivers/ni_660x.c | 2 -- drivers/staging/comedi/drivers/ni_mio_common.c | 1 - drivers/staging/comedi/drivers/ni_pcidio.c | 2 -- drivers/staging/comedi/drivers/ni_tio.c | 1 - drivers/staging/comedi/drivers/s626.c | 2 -- drivers/tty/serial/men_z135_uart.c | 1 - drivers/tty/serial/serial_txx9.c | 1 - drivers/usb/early/xhci-dbc.c | 4 --- drivers/usb/host/xhci-dbgcap.c | 2 -- include/linux/qed/qed_if.h | 2 -- sound/soc/txx9/txx9aclc-ac97.c | 1 - 123 files changed, 1 insertion(+), 508 deletions(-) (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v1.c') diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 4c97478d44bd..5826c2c98a50 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -303,8 +303,6 @@ static void post_se_instr(struct nitrox_softreq *sr, /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); - /* orders the doorbell rings */ - mmiowb(); cmdq->write_idx = incr_index(idx, 1, ndev->qlen); @@ -599,8 +597,6 @@ void pkt_slc_resp_tasklet(unsigned long data) * MSI-X interrupt generates if Completion count > Threshold */ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr); - /* order the writes */ - mmiowb(); if (atomic_read(&cmdq->backlog_count)) schedule_work(&cmdq->backlog_qflush); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index eb45af71d3a3..e8d0881b64d8 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -327,7 +327,6 @@ static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) channel_writel(dc, SAIR, 0); channel_writel(dc, DAIR, 0); channel_writel(dc, CCR, 0); - mmiowb(); } /* Called with dc->lock held and bh disabled */ @@ -954,7 +953,6 @@ static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); - mmiowb(); if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && channel_read_CHAR(dc) == prev->txd.phys) /* Restart chain DMA */ @@ -1080,7 +1078,6 @@ static void txx9dmac_free_chan_resources(struct dma_chan *chan) static void txx9dmac_off(struct txx9dmac_dev *ddev) { dma_writel(ddev, MCR, 0); - mmiowb(); } static int __init txx9dmac_chan_probe(struct platform_device *pdev) diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 45c048751f3b..7183ab34269e 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2939,7 +2939,6 @@ static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); - mmiowb(); ohci->mc_channels = channels; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f125a62eba8c..a46bffe2b288 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -182,7 +182,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, I915_WRITE(VIDEO_DIP_CTL, val); - mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); data++; @@ -190,7 +189,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) I915_WRITE(VIDEO_DIP_DATA, 0); - mmiowb(); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -237,7 +235,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, I915_WRITE(reg, val); - mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; @@ -245,7 +242,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); - mmiowb(); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -298,7 +294,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, I915_WRITE(reg, val); - mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; @@ -306,7 +301,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); - mmiowb(); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -352,7 +346,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, I915_WRITE(reg, val); - mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; @@ -360,7 +353,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); - mmiowb(); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -406,7 +398,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); - mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), *data); @@ -416,7 +407,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, for (; i < data_size; i += 4) I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), 0); - mmiowb(); val |= hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 67d4a7d4acc8..88d132edc4e3 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -156,7 +156,6 @@ static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif) u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl); tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl); - mmiowb(); /* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */ ndelay(270); tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); @@ -396,7 +395,6 @@ static void tx4939ide_init_hwif(ide_hwif_t *hwif) /* Soft Reset */ tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl); - mmiowb(); /* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */ ndelay(450); tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 12e67a91e578..8f270459b63e 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -8365,7 +8365,6 @@ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) struct hfi1_devdata *dd = rcd->dd; u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); - mmiowb(); write_csr(dd, addr, rcd->imask); /* force the above write on the chip and get a value back */ (void)read_csr(dd, addr); @@ -11803,12 +11802,10 @@ void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); } - mmiowb(); reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) << RCV_HDR_HEAD_HEAD_SHIFT); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); - mmiowb(); } u32 hdrqempty(struct hfi1_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index a1de566fe95e..16ba9d52e1b9 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1578,7 +1578,6 @@ void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) sc_del_credit_return_intr(sc); trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); if (needint) { - mmiowb(); sc_return_credits(sc); } } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 97515c340134..c8555f7704d8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1750,8 +1750,6 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, writel(val, hcr + 5); - mmiowb(); - return 0; } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 429a59c5801c..9426936460f8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -3744,12 +3744,6 @@ out: writel_relaxed(qp->doorbell_qpn, to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); - /* - * Make sure doorbells don't leak out of SQ spinlock - * and reach the HCA out of order. - */ - mmiowb(); - stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); qp->sq_next_wqe = ind; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7cd006da1dae..b680be1f3f47 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -5123,7 +5123,6 @@ out: /* Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ - mmiowb(); bf->offset ^= bf->buf_size; } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 83aa47eb81a9..bdf5ed38de22 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -292,12 +292,6 @@ static int mthca_cmd_post(struct mthca_dev *dev, err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier, op_modifier, op, token, event); - /* - * Make sure that our HCR writes don't get mixed in with - * writes from another CPU starting a FW command. - */ - mmiowb(); - mutex_unlock(&dev->cmd.hcr_mutex); return err; } diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index a6531ffe29a6..877a6daffa98 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -211,11 +211,6 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, dev->kar + MTHCA_CQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); - /* - * Make sure doorbells don't leak out of CQ spinlock - * and reach the HCA out of order: - */ - mmiowb(); } } diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 7a5b25d13faa..d65b189f20ea 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1809,11 +1809,6 @@ out: (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); - /* - * Make sure doorbells don't leak out of SQ spinlock - * and reach the HCA out of order: - */ - mmiowb(); } qp->sq.next_ind = ind; @@ -1924,12 +1919,6 @@ out: qp->rq.next_ind = ind; qp->rq.head += nreq; - /* - * Make sure doorbells don't leak out of RQ spinlock and reach - * the HCA out of order: - */ - mmiowb(); - spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } @@ -2164,12 +2153,6 @@ out: MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } - /* - * Make sure doorbells don't leak out of SQ spinlock and reach - * the HCA out of order: - */ - mmiowb(); - spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 06b920385512..a85935ccce88 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -570,12 +570,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } - /* - * Make sure doorbells don't leak out of SRQ spinlock and - * reach the HCA out of order: - */ - mmiowb(); - spin_unlock_irqrestore(&srq->lock, flags); return err; } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4dab2b5ffb0e..8686a98e113d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -773,9 +773,6 @@ static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) cq->db.data.agg_flags = flags; cq->db.data.value = cpu_to_le32(cons); writeq(cq->db.raw, cq->db_addr); - - /* Make sure write would stick */ - mmiowb(); } int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) @@ -2084,8 +2081,6 @@ static int qedr_update_qp_state(struct qedr_dev *dev, if (rdma_protocol_roce(&dev->ibdev, 1)) { writel(qp->rq.db_data.raw, qp->rq.db); - /* Make sure write takes effect */ - mmiowb(); } break; case QED_ROCE_QP_STATE_ERR: @@ -3502,9 +3497,6 @@ int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, smp_wmb(); writel(qp->sq.db_data.raw, qp->sq.db); - /* Make sure write sticks */ - mmiowb(); - spin_unlock_irqrestore(&qp->q_lock, flags); return rc; @@ -3695,12 +3687,8 @@ int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, writel(qp->rq.db_data.raw, qp->rq.db); - /* Make sure write sticks */ - mmiowb(); - if (rdma_protocol_iwarp(&dev->ibdev, 1)) { writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2); - mmiowb(); } wr = wr->next; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index cdbf707fa267..531d8a1db2c3 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1884,7 +1884,6 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, qib_write_kreg(dd, kr_scratch, 0xfeeddeaf); writel(pa, tidp32); qib_write_kreg(dd, kr_scratch, 0xdeadbeef); - mmiowb(); spin_unlock_irqrestore(tidlockp, flags); } @@ -1928,7 +1927,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr, pa |= 2 << 29; } writel(pa, tidp32); - mmiowb(); } @@ -2053,9 +2051,7 @@ static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, { if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); - mmiowb(); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); - mmiowb(); } static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 9fde45538f6e..ea3ddb05cbad 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -2175,7 +2175,6 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, pa = chippa; } writeq(pa, tidptr); - mmiowb(); } /** @@ -2704,9 +2703,7 @@ static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, { if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); - mmiowb(); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); - mmiowb(); } static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 17d6b24b3473..ac6a84f11ad0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -3793,7 +3793,6 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, pa = chippa; } writeq(pa, tidptr); - mmiowb(); } /** @@ -4440,10 +4439,8 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, adjust_rcv_timeout(rcd, npkts); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); - mmiowb(); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); - mmiowb(); } static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 12caf3db8c34..4f4a09c2dbcd 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -1068,7 +1068,6 @@ static int qib_sd_setvals(struct qib_devdata *dd) for (idx = 0; idx < NUM_DDS_REGS; ++idx) { data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; writeq(data, iaddr + idx); - mmiowb(); qib_read_kreg32(dd, kr_scratch); dds_reg_map >>= 4; for (midx = 0; midx < DDS_ROWS; ++midx) { @@ -1076,7 +1075,6 @@ static int qib_sd_setvals(struct qib_devdata *dd) data = dds_init_vals[midx].reg_vals[idx]; writeq(data, daddr); - mmiowb(); qib_read_kreg32(dd, kr_scratch); } /* End inner for (vals for this reg, each row) */ } /* end outer for (regs to be stored) */ @@ -1098,13 +1096,11 @@ static int qib_sd_setvals(struct qib_devdata *dd) didx = idx + min_idx; /* Store the next RXEQ register address */ writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); - mmiowb(); qib_read_kreg32(dd, kr_scratch); /* Iterate through RXEQ values */ for (vidx = 0; vidx < 4; vidx++) { data = rxeq_init_vals[idx].rdata[vidx]; writeq(data, taddr + (vidx << 6) + idx); - mmiowb(); qib_read_kreg32(dd, kr_scratch); } } /* end outer for (Reg-writes for RXEQ) */ diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c index 17d69bd5d7f1..49677ee889e3 100644 --- a/drivers/media/pci/dt3155/dt3155.c +++ b/drivers/media/pci/dt3155/dt3155.c @@ -46,7 +46,6 @@ static int read_i2c_reg(void __iomem *addr, u8 index, u8 *data) u32 tmp = index; iowrite32((tmp << 17) | IIC_READ, addr + IIC_CSR2); - mmiowb(); udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) return -EIO; /* error: NEW_CYCLE not cleared */ @@ -77,7 +76,6 @@ static int write_i2c_reg(void __iomem *addr, u8 index, u8 data) u32 tmp = index; iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2); - mmiowb(); udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) return -EIO; /* error: NEW_CYCLE not cleared */ @@ -104,7 +102,6 @@ static void write_i2c_reg_nowait(void __iomem *addr, u8 index, u8 data) u32 tmp = index; iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2); - mmiowb(); } /** @@ -264,7 +261,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id) FLD_DN_ODD | FLD_DN_EVEN | CAP_CONT_EVEN | CAP_CONT_ODD, ipd->regs + CSR1); - mmiowb(); } spin_lock(&ipd->lock); @@ -282,7 +278,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id) iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START); iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE); iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE); - mmiowb(); } /* enable interrupts, clear all irq flags */ @@ -437,12 +432,10 @@ static int dt3155_init_board(struct dt3155_priv *pd) /* resetting the adapter */ iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN | FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1); - mmiowb(); msleep(20); /* initializing adapter registers */ iowrite32(FIFO_EN | SRST, pd->regs + CSR1); - mmiowb(); iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT); iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT); iowrite32(0x00000020, pd->regs + FIFO_TRIGER); @@ -454,7 +447,6 @@ static int dt3155_init_board(struct dt3155_priv *pd) iowrite32(0, pd->regs + MASK_LENGTH); iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT); iowrite32(0x01010101, pd->regs + IIC_CLK_DUR); - mmiowb(); /* verifying that we have a DT3155 board (not just a SAA7116 chip) */ read_i2c_reg(pd->regs, DT_ID, &tmp); diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index bcdca9fbef51..e3a5af65dbce 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -644,7 +644,6 @@ static int jmb38x_ms_reset(struct jmb38x_ms_host *host) writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN | readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); - mmiowb(); for (cnt = 0; cnt < 20; ++cnt) { if (!(HOST_CONTROL_RESET_REQ @@ -659,7 +658,6 @@ reset_next: writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN | readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); - mmiowb(); for (cnt = 0; cnt < 20; ++cnt) { if (!(HOST_CONTROL_RESET @@ -672,7 +670,6 @@ reset_next: return -EIO; reset_ok: - mmiowb(); writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); return 0; @@ -1009,7 +1006,6 @@ static void jmb38x_ms_remove(struct pci_dev *dev) tasklet_kill(&host->notify); writel(0, host->addr + INT_SIGNAL_ENABLE); writel(0, host->addr + INT_STATUS_ENABLE); - mmiowb(); dev_dbg(&jm->pdev->dev, "interrupts off\n"); spin_lock_irqsave(&host->lock, flags); if (host->req) { diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index ec0832278170..9d0445a567db 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c @@ -156,7 +156,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd) /* Reset to power-on state */ writel(0, &idd->idd_misc_regs->int_out.raw); - mmiowb(); /* Set up square wave */ int_out.raw = 0; @@ -164,7 +163,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd) int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE; int_out.fields.diag = 0; writel(int_out.raw, &idd->idd_misc_regs->int_out.raw); - mmiowb(); /* Check square wave period averaged over some number of cycles */ start = ktime_get_ns(); diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3fbbadfa2ae1..8a47a6fc3fc7 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -350,9 +350,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev) hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); - - /* complete this write before we set host ready on another CPU */ - mmiowb(); } /** diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 9ac95b48ef92..cc729f7ab32e 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -403,7 +403,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev) fm->eject = tifm_7xx1_dummy_eject; fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif; writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); - mmiowb(); free_irq(dev->irq, fm); tifm_remove_adapter(fm); diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index 82a97866e0cf..546b1fc30e7d 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -967,7 +967,6 @@ static void alcor_timeout_timer(struct work_struct *work) alcor_request_complete(host, 0); } - mmiowb(); mutex_unlock(&host->cmd_mutex); } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a8141ff9be03..42e1bad024f4 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1807,7 +1807,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_send_command(host, mrq->cmd); } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } EXPORT_SYMBOL_GPL(sdhci_request); @@ -2010,8 +2009,6 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) */ if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); - - mmiowb(); } EXPORT_SYMBOL_GPL(sdhci_set_ios); @@ -2105,7 +2102,6 @@ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - mmiowb(); } } @@ -2353,7 +2349,6 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) host->tuning_done = 0; - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); /* Wait for Buffer Read Ready interrupt */ @@ -2705,7 +2700,6 @@ static bool sdhci_request_done(struct sdhci_host *host) host->mrqs_done[i] = NULL; - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); @@ -2739,7 +2733,6 @@ static void sdhci_timeout_timer(struct timer_list *t) sdhci_finish_mrq(host, host->cmd->mrq); } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -2770,7 +2763,6 @@ static void sdhci_timeout_data_timer(struct timer_list *t) } } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -3251,7 +3243,6 @@ int sdhci_resume_host(struct sdhci_host *host) mmc->ops->set_ios(mmc, &mmc->ios); } else { sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); - mmiowb(); } if (host->irq_wake_enabled) { @@ -3391,7 +3382,6 @@ void sdhci_cqe_enable(struct mmc_host *mmc) mmc_hostname(mmc), host->ier, sdhci_readl(host, SDHCI_INT_STATUS)); - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } EXPORT_SYMBOL_GPL(sdhci_cqe_enable); @@ -3416,7 +3406,6 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) mmc_hostname(mmc), host->ier, sdhci_readl(host, SDHCI_INT_STATUS)); - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } EXPORT_SYMBOL_GPL(sdhci_cqe_disable); @@ -4255,8 +4244,6 @@ int __sdhci_add_host(struct sdhci_host *host) goto unirq; } - mmiowb(); - ret = mmc_add_host(mmc); if (ret) goto unled; diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index b6644ce296b2..35dd34b82a4d 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -889,7 +889,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host) struct tifm_dev *sock = host->dev; writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); - mmiowb(); host->clk_div = 61; host->clk_freq = 20000000; writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); @@ -940,7 +939,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host) writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC | TIFM_MMCSD_ERRMASK, sock->addr + SOCK_MMCSD_INT_ENABLE); - mmiowb(); return 0; } @@ -1005,7 +1003,6 @@ static void tifm_sd_remove(struct tifm_dev *sock) spin_lock_irqsave(&sock->lock, flags); host->eject = 1; writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); - mmiowb(); spin_unlock_irqrestore(&sock->lock, flags); tasklet_kill(&host->finish_tasklet); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 32c4211506fc..412395ac2935 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -686,7 +686,6 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq) via_sdc_send_command(host, mrq->cmd); } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -711,7 +710,6 @@ static void via_sdc_set_power(struct via_crdr_mmc_host *host, gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON; writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); via_pwron_sleep(host); @@ -770,7 +768,6 @@ static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock) writeb(clock, addrbase + VIA_CRDR_PCISDCCLK); - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); if (ios->power_mode != MMC_POWER_OFF) @@ -830,7 +827,6 @@ static void via_reset_pcictrl(struct via_crdr_mmc_host *host) via_restore_pcictrlreg(host); via_restore_sdcreg(host); - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -925,7 +921,6 @@ static irqreturn_t via_sdc_isr(int irq, void *dev_id) result = IRQ_HANDLED; - mmiowb(); out: spin_unlock(&sdhost->lock); @@ -960,7 +955,6 @@ static void via_sdc_timeout(struct timer_list *t) } } - mmiowb(); spin_unlock_irqrestore(&sdhost->lock, flags); } @@ -1012,7 +1006,6 @@ static void via_sdc_card_detect(struct work_struct *work) tasklet_schedule(&host->finish_tasklet); } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); via_reset_pcictrl(host); @@ -1020,7 +1013,6 @@ static void via_sdc_card_detect(struct work_struct *work) spin_lock_irqsave(&host->lock, flags); } - mmiowb(); spin_unlock_irqrestore(&host->lock, flags); via_print_pcictrl(host); @@ -1188,7 +1180,6 @@ static void via_sd_remove(struct pci_dev *pcidev) /* Disable generating further interrupts */ writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL); - mmiowb(); if (sdhost->mrq) { pr_err("%s: Controller removed during " @@ -1197,7 +1188,6 @@ static void via_sd_remove(struct pci_dev *pcidev) /* make sure all DMA is stopped */ writel(VIA_CRDR_DMACTRL_SFTRST, sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL); - mmiowb(); sdhost->mrq->cmd->error = -ENOMEDIUM; if (sdhost->mrq->stop) sdhost->mrq->stop->error = -ENOMEDIUM; diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index 86456216fb93..7b99831aa046 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -45,7 +45,6 @@ static inline void r852_write_reg(struct r852_device *dev, int address, uint8_t value) { writeb(value, dev->mmio + address); - mmiowb(); } @@ -61,7 +60,6 @@ static inline void r852_write_reg_dword(struct r852_device *dev, int address, uint32_t value) { writel(cpu_to_le32(value), dev->mmio + address); - mmiowb(); } /* returns pointer to our private structure */ diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index ddf0420c0997..97978227aa55 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -159,7 +159,6 @@ static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd, if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE) txx9ndfmc_write(dev, 0, TXX9_NDFDTR); } - mmiowb(); } static int txx9ndfmc_dev_ready(struct nand_chip *chip) diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 47e5984f16fb..3155f7fa83eb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -613,7 +613,6 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) napi_schedule(&greth->napi); } - mmiowb(); spin_unlock(&greth->devlock); return retval; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 16477aa6d61f..4f7e792e50e9 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -345,8 +345,6 @@ static void slic_set_rx_mode(struct net_device *dev) if (sdev->promisc != set_promisc) { sdev->promisc = set_promisc; slic_configure_rcv(sdev); - /* make sure writes to receiver cant leak out of the lock */ - mmiowb(); } spin_unlock_bh(&sdev->link_lock); } @@ -1461,8 +1459,6 @@ static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev) if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS) netif_stop_queue(dev); - /* make sure writes to io-memory cant leak out of tx queue lock */ - mmiowb(); return NETDEV_TX_OK; drop_skb: diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index b17d435de09f..05798aa5bb73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2016,7 +2016,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) mb(); writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); - mmiowb(); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 9e07b469066a..f7583c5d9509 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2439,7 +2439,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); - mmiowb(); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index d99317b3d891..1474cac7e892 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -908,7 +908,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr >> 2)); - mmiowb(); dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d63371d70bce..dfdd14eadd57 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3305,8 +3305,6 @@ next_rx: BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); - mmiowb(); - return rx_pkt; } @@ -6723,8 +6721,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) BNX2_WR16(bp, txr->tx_bidx_addr, prod); BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - mmiowb(); - txr->tx_prod = prod; if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecb1bd7eb508..0c8f5b546c6f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4166,8 +4166,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); - txdata->tx_bd_prod += nbd; if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 1ed068509337..2d57af9c061c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -527,8 +527,6 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, ((u32 *)&rx_prods)[i]); - mmiowb(); - DP(NETIF_MSG_RX_STATUS, "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", fp->index, bd_prod, rx_comp_prod, rx_sge_prod); @@ -653,7 +651,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); /* Make sure that ACK is written */ - mmiowb(); barrier(); } @@ -674,7 +671,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); /* Make sure that ACK is written */ - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 749d0ef44371..0745cccd416d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2623,7 +2623,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) wmb(); DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); barrier(); num_pkts++; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e46786a56b0c..3716c828ff5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -869,9 +869,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) "write %x to HC %d (addr 0x%x)\n", val, port, addr); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -887,9 +884,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -1595,7 +1589,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) /* * Ensure that HC_CONFIG is written before leading/trailing edge config */ - mmiowb(); barrier(); if (!CHIP_IS_E1(bp)) { @@ -1611,9 +1604,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } static void bnx2x_igu_int_enable(struct bnx2x *bp) @@ -1674,9 +1664,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } void bnx2x_int_enable(struct bnx2x *bp) @@ -3833,7 +3820,6 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp) REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); - mmiowb(); } /** @@ -5244,7 +5230,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); - mmiowb(); } static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, @@ -6513,7 +6498,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp) /* flush all */ mb(); - mmiowb(); } void bnx2x_pre_irq_nic_init(struct bnx2x *bp) @@ -6553,7 +6537,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) /* flush all before enabling interrupts */ mb(); - mmiowb(); bnx2x_int_enable(bp); @@ -7775,12 +7758,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(bp, igu_addr_data, data); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); /* wait for clean up to finish */ @@ -9550,7 +9531,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); - mmiowb(); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ @@ -9674,7 +9654,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - mmiowb(); } } @@ -9774,16 +9753,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) reset_mask1 & (~not_reset_mask1)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - mmiowb(); } /** @@ -9867,9 +9843,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) REG_WR(bp, MISC_REG_UNPREPARED, 0); barrier(); - /* Make sure all is written to the chip before the reset */ - mmiowb(); - /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ @@ -14828,7 +14801,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (rc) break; - mmiowb(); barrier(); /* Start accepting on iSCSI L2 ring */ @@ -14863,7 +14835,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n"); - mmiowb(); barrier(); /* Unset iSCSI L2 MAC */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7b22a6d8514c..80d250a6d048 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -5039,7 +5039,6 @@ static inline int bnx2x_q_init(struct bnx2x *bp, /* As no ramrod is sent, complete the command immediately */ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); - mmiowb(); smp_mb(); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c97b642e6537..0edbb0a76847 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -100,13 +100,11 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr_data); REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..672b57f0b84d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -172,8 +172,6 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) /* Trigger the PF FW */ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid); - mmiowb(); - /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { msleep(interval); @@ -1179,7 +1177,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, /* ack the FW */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); - mmiowb(); /* copy the response header including status-done field, * must be last dmae, must be after FW is acked @@ -2174,7 +2171,6 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); /* Firmware ack should be written before unlocking channel */ - mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..b8b68d408ad0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -556,8 +556,6 @@ normal_tx: tx_done: - mmiowb(); - if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (skb->xmit_more && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); @@ -2123,7 +2121,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) &dim_sample); net_dim(&cpr->dim, dim_sample); } - mmiowb(); return work_done; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..821bccc0915c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1073,7 +1073,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi) struct tg3 *tp = tnapi->tp; tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); /* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of @@ -6999,7 +6998,6 @@ next_pkt_nopost: tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); } - mmiowb(); } else if (work_mask) { /* rx_std_buffers[] and rx_jmb_buffers[] entries must be * updated before the producer indices can be updated. @@ -7210,8 +7208,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, dpr->rx_jmb_prod_idx); - mmiowb(); - if (err) tw32_f(HOSTCC_MODE, tp->coal_now); } @@ -7278,7 +7274,6 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) HOSTCC_MODE_ENABLE | tnapi->coal_now); } - mmiowb(); break; } } @@ -8159,7 +8154,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!skb->xmit_more || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); - mmiowb(); } return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 2df7440f58df..39643be8c30a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -38,9 +38,6 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); - /* make sure that the reset is written before starting timer */ - mmiowb(); - /* Wait for 10ms as Octeon resets. */ mdelay(100); @@ -487,9 +484,6 @@ void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, /* Disable Interrupts */ writeq(0, cn6xxx->intr_enb_reg64); - - /* make sure interrupts are really disabled */ - mmiowb(); } static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) @@ -555,10 +549,6 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) value &= ~(1 << oq_no); octeon_write_csr(oct, reg, value); - /* Ensure that the enable register is written. - */ - mmiowb(); - spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); } } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index ce8c3f818666..934115d18488 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1449,7 +1449,6 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) iq->pkt_in_done -= iq->pkts_processed; iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ - mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a0c099f71524..017169023cca 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -513,8 +513,6 @@ int octeon_retry_droq_refill(struct octeon_droq *droq) */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) reschedule = 0; @@ -712,8 +710,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); } } } /* for (each packet)... */ diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index c6f4cbda040f..fcf20a8f92d9 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -278,7 +278,6 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { writel(iq->fill_cnt, iq->doorbell_reg); /* make sure doorbell write goes through */ - mmiowb(); iq->fill_cnt = 0; iq->last_db_time = jiffies; return; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8fe9af0e2ab7..466bf1ea186d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3270,11 +3270,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (!skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); - /* we need this if more than one processor can write to - * our tail at a time, it synchronizes IO on IA64/Altix - * systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7acc61e4f645..022c3ac0e40f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3816,7 +3816,6 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter) if (tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; ew32(TDT(0), tx_ring->next_to_use); - mmiowb(); usleep_range(200, 250); } @@ -5904,12 +5903,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_ring->next_to_use); else writel(tx_ring->next_to_use, tx_ring->tail); - - /* we need this if more than one processor can write - * to our tail at a time, it synchronizes IO on - *IA64/Altix systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5d4f1761dc0c..8de77155f2e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -321,8 +321,6 @@ static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask); err_mask |= PCI_ERR_UNC_COMP_ABORT; pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask); - - mmiowb(); } int fm10k_iov_resume(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..1f48298f01e6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1037,11 +1037,6 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6c97667d20ef..ffb611bbedfa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3471,11 +3471,6 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 9b4d7cec2e18..6bfef82e7607 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2360,11 +2360,6 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c289d97f477d..1af21bbe180e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1356,11 +1356,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..09ba94496742 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6028,11 +6028,6 @@ static int igb_tx_map(struct igb_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 4eab83faec62..34cd30d7162f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2279,10 +2279,6 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..f8d692f6aa4f 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -892,11 +892,6 @@ static int igc_tx_map(struct igc_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e100054a3765..99e23cf6a73a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8299,11 +8299,6 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 8b3495ee2b6e..49486c10ef81 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1139,9 +1139,6 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - - /* Synchronize I/O on since next processor may write to tail */ - mmiowb(); } @@ -1354,7 +1351,6 @@ stopped: /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c81d15bf259c..87e90b5d4d7d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -129,10 +129,6 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; __raw_writel((__force u32)cpu_to_be32(comm_flags), (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; while (time_before(jiffies, end)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a5d5d6fc1da0..c678344d22a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -281,7 +281,6 @@ static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); __raw_writel((__force u32) cpu_to_be32(val), &priv->mfunc.comm->slave_write); - mmiowb(); mutex_unlock(&dev->persist->device_state_mutex); return 0; } @@ -496,12 +495,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (op_modifier << HCR_OPMOD_SHIFT) | op), hcr + 6); - /* - * Make sure that our HCR writes don't get mixed in with - * writes from another CPU starting a FW command. - */ - mmiowb(); - cmd->toggle = cmd->toggle ^ 1; ret = 0; @@ -2206,7 +2199,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, } __raw_writel((__force u32) cpu_to_be32(reply), &priv->mfunc.comm[slave].slave_read); - mmiowb(); return; @@ -2410,7 +2402,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) &priv->mfunc.comm[i].slave_write); __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_read); - mmiowb(); for (port = 1; port <= MLX4_MAX_PORTS; port++) { struct mlx4_vport_state *admin_vport; struct mlx4_vport_state *oper_vport; @@ -2576,10 +2567,6 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; __raw_writel((__force u32)cpu_to_be32(slave_read), &priv->mfunc.comm[slave].slave_read); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index be48c6440251..c087d1014b09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -917,7 +917,6 @@ static void cmd_work_handler(struct work_struct *work) mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); - mmiowb(); /* if not in polling don't use ent after this point */ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e0340f778d8f..d8b7fba96d58 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1439,7 +1439,6 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) tx->queue_active = 0; put_be32(htonl(1), tx->send_stop); mb(); - mmiowb(); } __netif_tx_unlock(dev_queue); } @@ -2861,7 +2860,6 @@ again: tx->queue_active = 1; put_be32(htonl(1), tx->send_go); mb(); - mmiowb(); } tx->pkt_start++; if ((avail - count) < MXGEFW_MAX_SEND_DESC) { diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index feda9644289d..3b2ae1a21678 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4153,8 +4153,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) writeq(val64, &tx_fifo->List_Control); - mmiowb(); - put_off++; if (put_off == fifo->tx_curr_put_info.fifo_len + 1) put_off = 0; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b877acec5cde..1d334f2e0a56 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1826,7 +1826,6 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); - mmiowb(); } /* We are copying and returning the local variable, in case if after @@ -2234,8 +2233,6 @@ static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); - mmiowb(); - return IRQ_HANDLED; } @@ -2272,14 +2269,12 @@ vxge_alarm_msix_handle(int irq, void *dev_id) */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); - mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, msix_id); - mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 59e77e3086bb..709d20d9938f 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -1399,11 +1399,7 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), &fifo->nofl_db->control_0); - mmiowb(); - writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); - - mmiowb(); } /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..69e6a90edf2f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -774,18 +774,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, { u16 rc = 0, index; - /* Make certain HW write took affect */ - mmiowb(); - index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); if (p_sb_desc->index != index) { p_sb_desc->index = index; rc = QED_SB_ATT_IDX; } - /* Make certain we got a consistent view with HW */ - mmiowb(); - return rc; } @@ -1170,7 +1164,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } @@ -1805,9 +1798,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); - /* Flush the writes to IGU */ - mmiowb(); - /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); } @@ -1871,9 +1861,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); - /* Flush the write to IGU */ - mmiowb(); - /* calculate where to read the status bit from */ sb_bit = 1 << (igu_sb_id % 32); sb_bit_addr = igu_sb_id / 32 * sizeof(u32); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 79b311b86f66..f5f3c03b9dd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); - - /* keep prod updates ordered */ - mmiowb(); } int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b4c8949933f1..4555c0b161ef 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1526,14 +1526,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { if (qede_txq_has_work(txq)) break; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 31b046e24565..6f7e3622c6b4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); } static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b61b88cbc0c7..457444894d80 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) wmb(); writel_relaxed(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); - mmiowb(); } } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 3e71b65a9546..ad7c5eb8a3b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) static inline void ql_write_db_reg(u32 val, void __iomem *addr) { writel(val, addr); - mmiowb(); } /* diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 07e1c623048e..6cae33072496 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) wmb(); ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - mmiowb(); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8154b38c08f7..316b47741d3f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -728,7 +728,6 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); - mmiowb(); spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -848,7 +847,6 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -881,7 +879,6 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -898,7 +895,6 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -943,7 +939,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } } @@ -959,7 +954,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, mask, RIE0); ravb_write(ndev, mask, TIE); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); /* Receive error message handling */ @@ -1008,7 +1002,6 @@ static void ravb_adjust_link(struct net_device *ndev) if (priv->no_avb_link && phydev->link) ravb_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); if (new_state && netif_msg_link(priv)) @@ -1601,7 +1594,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) netif_stop_subqueue(ndev, q); exit: - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; @@ -1673,7 +1665,6 @@ static void ravb_set_rx_mode(struct net_device *ndev) spin_lock_irqsave(&priv->lock, flags); ravb_modify(ndev, ECMR, ECMR_PRM, ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index dce2a40a31e3..9a42580693cb 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -196,7 +196,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ravb_write(ndev, GIE_PTCS, GIE); else ravb_write(ndev, GID_PTCD, GID); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -259,7 +258,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, else ravb_write(ndev, GID_PTMD0, GID); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return error; @@ -331,7 +329,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e33af371b169..ed30aebdb941 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2010,7 +2010,6 @@ static void sh_eth_adjust_link(struct net_device *ndev) if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) sh_eth_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&mdp->lock, flags); if (new_state && netif_msg_link(mdp)) diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h index 7085ee1d5e2b..c3577643fbda 100644 --- a/drivers/net/ethernet/sfc/falcon/io.h +++ b/drivers/net/ethernet/sfc/falcon/io.h @@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value, _ef4_writed(efx, value->u32[2], reg + 8); _ef4_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 89563170af52..2774a10f44e9 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index c07fd594fe71..db5dc8ce0aff 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -361,7 +361,6 @@ static void sc92031_disable_interrupts(struct net_device *dev) /* stop interrupts */ iowrite32(0, port_base + IntrMask); _sc92031_dummy_read(port_base); - mmiowb(); /* wait for any concurrent interrupt/tasklet to finish */ synchronize_irq(priv->pdev->irq); @@ -379,7 +378,6 @@ static void sc92031_enable_interrupts(struct net_device *dev) wmb(); iowrite32(IntrBits, port_base + IntrMask); - mmiowb(); } static void _sc92031_disable_tx_rx(struct net_device *dev) @@ -867,7 +865,6 @@ out: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); spin_unlock(&priv->lock); } @@ -901,7 +898,6 @@ out_none: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); return IRQ_NONE; } @@ -978,7 +974,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, port_base + TxAddr0 + entry * 4); iowrite32(tx_status, port_base + TxStatus0 + entry * 4); - mmiowb(); if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) netif_stop_queue(dev); @@ -1024,7 +1019,6 @@ static int sc92031_open(struct net_device *dev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); @@ -1060,7 +1054,6 @@ static int sc92031_stop(struct net_device *dev) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1081,7 +1074,6 @@ static void sc92031_set_multicast_list(struct net_device *dev) _sc92031_set_mar(dev); _sc92031_set_rx_config(dev); - mmiowb(); spin_unlock_bh(&priv->lock); } @@ -1098,7 +1090,6 @@ static void sc92031_tx_timeout(struct net_device *dev) priv->tx_timeouts++; _sc92031_reset(dev); - mmiowb(); spin_unlock(&priv->lock); @@ -1140,7 +1131,6 @@ sc92031_ethtool_get_link_ksettings(struct net_device *dev, output_status = _sc92031_mii_read(port_base, MII_OutputStatus); _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1311,7 +1301,6 @@ static int sc92031_ethtool_set_wol(struct net_device *dev, priv->pm_config = pm_config; iowrite32(pm_config, port_base + PMConfig); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1337,7 +1326,6 @@ static int sc92031_ethtool_nway_reset(struct net_device *dev) out: _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1530,7 +1518,6 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1555,7 +1542,6 @@ static int sc92031_resume(struct pci_dev *pdev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 33949248c829..ab55416a10fa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -571,7 +571,6 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask) if (rp->quirks & rqStatusWBRace) iowrite8(mask >> 16, ioaddr + IntrStatus2); iowrite16(mask, ioaddr + IntrStatus); - mmiowb(); } /* @@ -863,7 +862,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); - mmiowb(); } return work_done; } @@ -1893,7 +1891,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, static void rhine_irq_disable(struct rhine_private *rp) { iowrite16(0x0000, rp->base + IntrEnable); - mmiowb(); } /* The interrupt handler does all of the Rx thread work and cleans up diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index d8ba512f166a..1713c2d2dccf 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -219,7 +219,6 @@ static inline int __w5100_write_direct(struct net_device *ndev, u32 addr, static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { __w5100_write_direct(ndev, addr, data); - mmiowb(); return 0; } @@ -236,7 +235,6 @@ static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data) { __w5100_write_direct(ndev, addr, data >> 8); __w5100_write_direct(ndev, addr + 1, data); - mmiowb(); return 0; } @@ -260,8 +258,6 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++, addr++) __w5100_write_direct(ndev, addr, *buf++); - mmiowb(); - return 0; } @@ -375,7 +371,6 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf, for (i = 0; i < len; i++) *buf++ = w5100_read_direct(ndev, W5100_IDM_DR); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; @@ -394,7 +389,6 @@ static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++) __w5100_write_direct(ndev, W5100_IDM_DR, *buf++); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f9da5d6172e3..3f03eecc0479 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -141,7 +141,6 @@ static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); data = w5300_read_direct(priv, W5300_IDM_DR); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -154,9 +153,7 @@ static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); w5300_write_direct(priv, W5300_IDM_DR, data); - mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); } @@ -192,7 +189,6 @@ static int w5300_command(struct w5300_priv *priv, u16 cmd) unsigned long timeout = jiffies + msecs_to_jiffies(100); w5300_write(priv, W5300_S0_CR, cmd); - mmiowb(); while (w5300_read(priv, W5300_S0_CR) != 0) { if (time_after(jiffies, timeout)) @@ -241,18 +237,15 @@ static void w5300_write_macaddr(struct w5300_priv *priv) w5300_write(priv, W5300_SHARH, ndev->dev_addr[4] << 8 | ndev->dev_addr[5]); - mmiowb(); } static void w5300_hw_reset(struct w5300_priv *priv) { w5300_write_direct(priv, W5300_MR, MR_RST); - mmiowb(); mdelay(5); w5300_write_direct(priv, W5300_MR, priv->indirect ? MR_WDF(7) | MR_PB | MR_IND : MR_WDF(7) | MR_PB); - mmiowb(); w5300_write(priv, W5300_IMR, 0); w5300_write_macaddr(priv); @@ -264,24 +257,20 @@ static void w5300_hw_reset(struct w5300_priv *priv) w5300_write32(priv, W5300_TMSRL, 64 << 24); w5300_write32(priv, W5300_TMSRH, 0); w5300_write(priv, W5300_MTYPE, 0x00ff); - mmiowb(); } static void w5300_hw_start(struct w5300_priv *priv) { w5300_write(priv, W5300_S0_MR, priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); - mmiowb(); w5300_command(priv, S0_CR_OPEN); w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } static void w5300_hw_close(struct w5300_priv *priv) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); w5300_command(priv, S0_CR_CLOSE); } @@ -372,7 +361,6 @@ static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) netif_stop_queue(ndev); w5300_write_frame(priv, skb->data, skb->len); - mmiowb(); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); @@ -419,7 +407,6 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } return rx_count; @@ -434,7 +421,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (!ir) return IRQ_NONE; w5300_write(priv, W5300_S0_IR, ir); - mmiowb(); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); @@ -444,7 +430,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (ir & S0_IR_RECV) { if (napi_schedule_prep(&priv->napi)) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); __napi_schedule(&priv->napi); } } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index a2351ef45ae0..65a4c142640d 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -837,7 +837,6 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, txq->link = &ds->ds_link; ath5k_hw_start_tx_dma(ah, txq->qnum); - mmiowb(); spin_unlock_bh(&txq->lock); return 0; @@ -2174,7 +2173,6 @@ ath5k_beacon_config(struct ath5k_hw *ah) } ath5k_hw_set_imr(ah, ah->imask); - mmiowb(); spin_unlock_bh(&ah->block); } @@ -2779,7 +2777,6 @@ int ath5k_start(struct ieee80211_hw *hw) ret = 0; done: - mmiowb(); mutex_unlock(&ah->lock); set_bit(ATH_STAT_STARTED, ah->status); @@ -2839,7 +2836,6 @@ void ath5k_stop(struct ieee80211_hw *hw) "putting device to sleep\n"); } - mmiowb(); mutex_unlock(&ah->lock); ath5k_stop_tasklets(ah); diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 16e052d02c94..5e866a193ed0 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -263,7 +263,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); common->curaid = 0; ath5k_hw_set_bssid(ah); - mmiowb(); } if (changes & BSS_CHANGED_BEACON_INT) @@ -528,7 +527,6 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = -EINVAL; } - mmiowb(); mutex_unlock(&ah->lock); return ret; } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 74be3c809225..4c7980f84591 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -485,7 +485,6 @@ static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) val = swab32(val); b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); - mmiowb(); b43_write32(dev, B43_MMIO_RAM_DATA, val); } @@ -656,9 +655,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) /* The hardware guarantees us an atomic write, if we * write the low register first. */ b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low); - mmiowb(); b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high); - mmiowb(); } void b43_tsf_write(struct b43_wldev *dev, u64 tsf) @@ -1822,11 +1819,9 @@ static void b43_beacon_update_trigger_work(struct work_struct *work) if (b43_bus_host_is_sdio(dev->dev)) { /* wl->mutex is enough. */ b43_do_beacon_update_trigger_work(dev); - mmiowb(); } else { spin_lock_irq(&wl->hardirq_lock); b43_do_beacon_update_trigger_work(dev); - mmiowb(); spin_unlock_irq(&wl->hardirq_lock); } } @@ -2078,7 +2073,6 @@ static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id) mutex_lock(&dev->wl->mutex); b43_do_interrupt_thread(dev); - mmiowb(); mutex_unlock(&dev->wl->mutex); return IRQ_HANDLED; @@ -2143,7 +2137,6 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) spin_lock(&dev->wl->hardirq_lock); ret = b43_do_interrupt(dev); - mmiowb(); spin_unlock(&dev->wl->hardirq_lock); return ret; diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c index 3190493bd07f..93d03b673670 100644 --- a/drivers/net/wireless/broadcom/b43/sysfs.c +++ b/drivers/net/wireless/broadcom/b43/sysfs.c @@ -129,7 +129,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev, } else err = -ENOSYS; - mmiowb(); mutex_unlock(&wldev->wl->mutex); return err ? err : count; diff --git a/drivers/net/wireless/broadcom/b43legacy/ilt.c b/drivers/net/wireless/broadcom/b43legacy/ilt.c index ee5682e54204..6d15fb4d30c6 100644 --- a/drivers/net/wireless/broadcom/b43legacy/ilt.c +++ b/drivers/net/wireless/broadcom/b43legacy/ilt.c @@ -315,14 +315,12 @@ const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); - mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); } void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); - mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, (val & 0xFFFF0000) >> 16); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 55f411925960..c777efc6dc13 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -264,7 +264,6 @@ static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset, val = swab32(val); b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset); - mmiowb(); b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val); } @@ -341,14 +340,11 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev, if (offset & 0x0003) { /* Unaligned access */ b43legacy_shm_control_word(dev, routing, offset >> 2); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); - mmiowb(); b43legacy_shm_control_word(dev, routing, (offset >> 2) + 1); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value & 0xffff); return; @@ -356,7 +352,6 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev, offset >>= 2; } b43legacy_shm_control_word(dev, routing, offset); - mmiowb(); b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value); } @@ -368,7 +363,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset, if (offset & 0x0003) { /* Unaligned access */ b43legacy_shm_control_word(dev, routing, offset >> 2); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA_UNALIGNED, value); @@ -377,7 +371,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset, offset >>= 2; } b43legacy_shm_control_word(dev, routing, offset); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value); } @@ -471,7 +464,6 @@ static void b43legacy_time_lock(struct b43legacy_wldev *dev) status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); status |= B43legacy_MACCTL_TBTTHOLD; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); - mmiowb(); } static void b43legacy_time_unlock(struct b43legacy_wldev *dev) @@ -494,10 +486,8 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf) u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0); - mmiowb(); b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH, hi); - mmiowb(); b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, lo); } else { @@ -507,13 +497,9 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf) u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0); } } @@ -1250,7 +1236,6 @@ static void b43legacy_beacon_update_trigger_work(struct work_struct *work) /* The handler might have updated the IRQ mask. */ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); - mmiowb(); spin_unlock_irq(&wl->irq_lock); } mutex_unlock(&wl->mutex); @@ -1346,7 +1331,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) dma_reason[2], dma_reason[3], dma_reason[4], dma_reason[5]); b43legacy_controller_restart(dev, "DMA error"); - mmiowb(); spin_unlock_irqrestore(&dev->wl->irq_lock, flags); return; } @@ -1396,7 +1380,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) handle_irq_transmit_status(dev); b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); - mmiowb(); spin_unlock_irqrestore(&dev->wl->irq_lock, flags); } @@ -1488,7 +1471,6 @@ static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id) dev->irq_reason = reason; tasklet_schedule(&dev->isr_tasklet); out: - mmiowb(); spin_unlock(&dev->wl->irq_lock); return ret; @@ -2781,7 +2763,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw, spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); - mmiowb(); spin_unlock_irqrestore(&wl->irq_lock, flags); out_unlock_mutex: mutex_unlock(&wl->mutex); @@ -2900,7 +2881,6 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw, spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); /* XXX: why? */ - mmiowb(); spin_unlock_irqrestore(&wl->irq_lock, flags); out_unlock_mutex: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index 995c7d0c212a..f949766d27ca 100644 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c @@ -134,7 +134,6 @@ u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset) void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val); } diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.h b/drivers/net/wireless/broadcom/b43legacy/pio.h index 1cd1b9ca5e9c..08cd02282beb 100644 --- a/drivers/net/wireless/broadcom/b43legacy/pio.h +++ b/drivers/net/wireless/broadcom/b43legacy/pio.h @@ -92,7 +92,6 @@ void b43legacy_pio_write(struct b43legacy_pioqueue *queue, u16 offset, u16 value) { b43legacy_write16(queue->dev, queue->mmio_base + offset, value); - mmiowb(); } diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c index eab1c9387846..c6db444ea07e 100644 --- a/drivers/net/wireless/broadcom/b43legacy/radio.c +++ b/drivers/net/wireless/broadcom/b43legacy/radio.c @@ -95,7 +95,6 @@ void b43legacy_radio_lock(struct b43legacy_wldev *dev) B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK); status |= B43legacy_MACCTL_RADIOLOCK; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); - mmiowb(); udelay(10); } @@ -108,7 +107,6 @@ void b43legacy_radio_unlock(struct b43legacy_wldev *dev) B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK)); status &= ~B43legacy_MACCTL_RADIOLOCK; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); - mmiowb(); } u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) @@ -141,7 +139,6 @@ u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); - mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val); } @@ -333,7 +330,6 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) { b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); - mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); } diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c index 2a1da15c913b..2db83eec7a11 100644 --- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c @@ -143,7 +143,6 @@ static ssize_t b43legacy_attr_interfmode_store(struct device *dev, if (err) b43legacyerr(wldev->wl, "Interference Mitigation not " "supported by device\n"); - mmiowb(); spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); mutex_unlock(&wldev->wl->mutex); diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index b079c64ca014..986646af8dfd 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2030,13 +2030,6 @@ static inline void _il_release_nic_access(struct il_priv *il) { _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - /* - * In above we are reading CSR_GP_CNTRL register, what will flush any - * previous writes, but still want write, which clear MAC_ACCESS_REQ - * bit, be performed on PCI bus before any other writes scheduled on - * different CPUs (after we drop reg_lock). - */ - mmiowb(); } static inline u32 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index fe8269d023de..abbfc9cc80fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2067,7 +2067,6 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, * MAC_ACCESS_REQ bit to be performed before any other writes * scheduled on different CPUs (after we drop reg_lock). */ - mmiowb(); out: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); } diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index 1dede87dd54f..dcf234680535 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -358,8 +358,6 @@ static void idt_sw_write(struct idt_ntb_dev *ndev, iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR); /* Put the new value of the register */ iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA); - /* Make sure the PCIe transactions are executed */ - mmiowb(); /* Unlock GASA registers operations */ spin_unlock_irqrestore(&ndev->gasa_lock, irqflags); } @@ -750,7 +748,6 @@ static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev) spin_lock_irqsave(&ndev->mtbl_lock, irqflags); idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata); - mmiowb(); spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); /* Notify the peers by setting and clearing the global signal bit */ @@ -778,7 +775,6 @@ static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev) spin_lock_irqsave(&ndev->mtbl_lock, irqflags); idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0); - mmiowb(); spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); /* Notify the peers by setting and clearing the global signal bit */ @@ -1339,7 +1335,6 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr); idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32)); idt_nt_write(ndev, IDT_NT_LUTUDATA, data); - mmiowb(); spin_unlock_irqrestore(&ndev->lut_lock, irqflags); /* Limit address isn't specified since size is fixed for LUT */ } @@ -1393,7 +1388,6 @@ static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx, idt_nt_write(ndev, IDT_NT_LUTLDATA, 0); idt_nt_write(ndev, IDT_NT_LUTMDATA, 0); idt_nt_write(ndev, IDT_NT_LUTUDATA, 0); - mmiowb(); spin_unlock_irqrestore(&ndev->lut_lock, irqflags); } @@ -1812,7 +1806,6 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, /* Set the route and send the data */ idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl); idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg); - mmiowb(); /* Unlock the messages routing table */ spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags); diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 2a9d6b0d1f19..11a6cd374004 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -284,11 +284,9 @@ static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, ntb_peer_spad_write(perf->ntb, peer->pidx, PERF_SPAD_HDATA(perf->gidx), upper_32_bits(data)); - mmiowb(); ntb_peer_spad_write(perf->ntb, peer->pidx, PERF_SPAD_CMD(perf->gidx), cmd); - mmiowb(); ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx)); dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n", @@ -379,7 +377,6 @@ static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA, upper_32_bits(data)); - mmiowb(); /* This call shall trigger peer message event */ ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd); diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 0e119d838e1b..762cb77253b9 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -62,8 +62,7 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ writel((__bfa)->iocfc.req_cq_pi[__reqq], \ (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \ - mmiowb(); \ - } while (0) + } while (0) #define bfa_rspq_pi(__bfa, __rspq) \ (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index c4a0c0eb88a5..4a0d881b2602 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -61,7 +61,6 @@ bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); - mmiowb(); } void @@ -72,7 +71,6 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); - mmiowb(); } void diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index b0ff378dece2..b7be5f4f02a5 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -81,7 +81,6 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); - mmiowb(); } /* @@ -94,7 +93,6 @@ bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); - mmiowb(); } void diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 039328d9ef13..19734ec7f42e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -991,7 +991,6 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) FCOE_CQE_TOGGLE_BIT_SHIFT); msg = *((u32 *)rx_db); writel(cpu_to_le32(msg), tgt->ctx_base); - mmiowb(); } @@ -1409,7 +1408,6 @@ void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) (tgt->sq_curr_toggle_bit << 15); msg = *((u32 *)sq_db); writel(cpu_to_le32(msg), tgt->ctx_base); - mmiowb(); } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index d56a78f411cd..12666313b937 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -253,7 +253,6 @@ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) writew(ep->qp.rq_prod_idx, ep->qp.ctx_base + CNIC_RECV_DOORBELL); } - mmiowb(); } @@ -279,8 +278,6 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) bnx2i_ring_577xx_doorbell(bnx2i_conn); } else writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); - - mmiowb(); } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 293f5cf524d7..59a6546fd602 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -815,7 +815,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, &(regs)->inbound_high_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); - mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 1d17128030cd..e35c2b64c145 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -242,7 +242,6 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, &instance->reg_set->inbound_low_queue_port); writel(le32_to_cpu(req_desc->u.high), &instance->reg_set->inbound_high_queue_port); - mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); #endif } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1d8c584ec1e9..f60b9e0a6ca6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3333,7 +3333,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, spin_lock_irqsave(writeq_lock, flags); __raw_writel((u32)(b), addr); __raw_writel((u32)(b >> 32), (addr + 4)); - mmiowb(); spin_unlock_irqrestore(writeq_lock, flags); } diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 6ca583bdde23..53e8221f6816 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -807,7 +807,6 @@ void qedf_ring_doorbell(struct qedf_rport *fcport) writel(*(u32 *)&dbell, fcport->p_doorbell); /* Make sure SQ index is updated so f/w prcesses requests in order */ wmb(); - mmiowb(); } static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index e2a995a6e8e7..f8f86774f77f 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -985,7 +985,6 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) * others they are two different assembly operations. */ wmb(); - mmiowb(); QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ, "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n", qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx, diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 6856dfdfa473..93acbc5094f0 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -3004,8 +3004,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) sp->flags |= SRB_SENT; ha->actthreads++; WRT_REG_WORD(®->mailbox4, ha->req_ring_index); - /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ - mmiowb(); out: if (status) @@ -3254,8 +3252,6 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) sp->flags |= SRB_SENT; ha->actthreads++; WRT_REG_WORD(®->mailbox4, ha->req_ring_index); - /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ - mmiowb(); out: if (status) @@ -3379,7 +3375,6 @@ qla1280_isp_cmd(struct scsi_qla_host *ha) * See Documentation/driver-api/device-io.rst for more information. */ WRT_REG_WORD(®->mailbox4, ha->req_ring_index); - mmiowb(); LEAVE("qla1280_isp_cmd"); } diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 84807a9b4b13..da2d2ab8104d 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -305,7 +305,6 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) else if (i % 2) pr_cont("."); writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2)); - mmiowb(); msleep(20); } err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index 567013f8a8be..d7d730c245c5 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -338,7 +338,6 @@ static void ssb_pcmcia_write8(struct ssb_device *dev, u16 offset, u8 value) err = select_core_and_segment(dev, &offset); if (likely(!err)) writeb(value, bus->mmio + offset); - mmiowb(); spin_unlock_irqrestore(&bus->bar_lock, flags); } @@ -352,7 +351,6 @@ static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value) err = select_core_and_segment(dev, &offset); if (likely(!err)) writew(value, bus->mmio + offset); - mmiowb(); spin_unlock_irqrestore(&bus->bar_lock, flags); } @@ -368,7 +366,6 @@ static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value) writew((value & 0x0000FFFF), bus->mmio + offset); writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2); } - mmiowb(); spin_unlock_irqrestore(&bus->bar_lock, flags); } @@ -424,7 +421,6 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer, WARN_ON(1); } unlock: - mmiowb(); spin_unlock_irqrestore(&bus->bar_lock, flags); } #endif /* CONFIG_SSB_BLOCKIO */ diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index 61e03ad84123..639ec1586976 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c @@ -371,7 +371,6 @@ static unsigned int mite_get_status(struct mite_channel *mite_chan) writel(CHOR_CLRDONE, mite->mmio + MITE_CHOR(mite_chan->channel)); } - mmiowb(); spin_unlock_irqrestore(&mite->lock, flags); return status; } @@ -451,7 +450,6 @@ void mite_dma_arm(struct mite_channel *mite_chan) mite_chan->done = 0; /* arm */ writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel)); - mmiowb(); spin_unlock_irqrestore(&mite->lock, flags); } EXPORT_SYMBOL_GPL(mite_dma_arm); @@ -638,7 +636,6 @@ void mite_release_channel(struct mite_channel *mite_chan) CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, mite->mmio + MITE_CHCR(mite_chan->channel)); mite_chan->ring = NULL; - mmiowb(); } spin_unlock_irqrestore(&mite->lock, flags); } diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index 405573e927cf..4ee9b260eab0 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -320,7 +320,6 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev, ni_660x_write(dev, chip, devpriv->dma_cfg[chip] | NI660X_DMA_CFG_RESET(mite_channel), NI660X_DMA_CFG); - mmiowb(); } static inline void ni_660x_unset_dma_channel(struct comedi_device *dev, @@ -333,7 +332,6 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev, devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel); devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel); ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG); - mmiowb(); } static int ni_660x_request_mite_channel(struct comedi_device *dev, diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index b04dad8c7092..668f2aa16baa 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -547,7 +547,6 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg, reg); break; } - mmiowb(); spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags); } diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index 4bdef87d5dd7..8f3864799c19 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -310,7 +310,6 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev) writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) | secondary_DMAChannel_bits(devpriv->di_mite_chan->channel), dev->mmio + DMA_LINE_CONTROL_GROUP1); - mmiowb(); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; } @@ -327,7 +326,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev) writeb(primary_DMAChannel_bits(0) | secondary_DMAChannel_bits(0), dev->mmio + DMA_LINE_CONTROL_GROUP1); - mmiowb(); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); } diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 048cb35723ad..c1131a1622c0 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -234,7 +234,6 @@ static void ni_tio_set_bits_transient(struct ni_gpct *counter, regs[reg] &= ~mask; regs[reg] |= (value & mask); ni_tio_write(counter, regs[reg] | transient, reg); - mmiowb(); spin_unlock_irqrestore(&counter_dev->regs_lock, flags); } } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index f5af6f4069dc..39049d3c56d7 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -108,7 +108,6 @@ static void s626_mc_enable(struct comedi_device *dev, { unsigned int val = (cmd << 16) | cmd; - mmiowb(); writel(val, dev->mmio + reg); } @@ -116,7 +115,6 @@ static void s626_mc_disable(struct comedi_device *dev, unsigned int cmd, unsigned int reg) { writel(cmd << 16, dev->mmio + reg); - mmiowb(); } static bool s626_mc_test(struct comedi_device *dev, diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c index ef89534dd760..e5d3ebab6dae 100644 --- a/drivers/tty/serial/men_z135_uart.c +++ b/drivers/tty/serial/men_z135_uart.c @@ -353,7 +353,6 @@ static void men_z135_handle_tx(struct men_z135_port *uart) memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1); - mmiowb(); iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL); diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index 1b4008d022bf..d22ccb32aa9b 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -248,7 +248,6 @@ static void serial_txx9_initialize(struct uart_port *port) sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); /* TX4925 BUG WORKAROUND. Accessing SIOC register * immediately after soft reset causes bus error. */ - mmiowb(); udelay(1); while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout) udelay(1); diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index c9cfb100ecdc..cac991173ac0 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -533,8 +533,6 @@ static int xdbc_handle_external_reset(void) xdbc_mem_init(); - mmiowb(); - ret = xdbc_start(); if (ret < 0) goto reset_out; @@ -587,8 +585,6 @@ static int __init xdbc_early_setup(void) xdbc_mem_init(); - mmiowb(); - ret = xdbc_start(); if (ret < 0) { writel(0, &xdbc.xdbc_reg->control); diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index d932cc31711e..52e32644a4b2 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -421,8 +421,6 @@ static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags) string_length = xhci_dbc_populate_strings(dbc->string); xhci_dbc_init_contexts(xhci, string_length); - mmiowb(); - xhci_dbc_eps_init(xhci); dbc->state = DS_INITIALIZED; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f6165d304b4d..48841e5dab90 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1338,7 +1338,6 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /* Let SB update */ - mmiowb(); return rc; } @@ -1374,7 +1373,6 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c index 1cfca698ae4b..b0fa285c7ba2 100644 --- a/sound/soc/txx9/txx9aclc-ac97.c +++ b/sound/soc/txx9/txx9aclc-ac97.c @@ -102,7 +102,6 @@ static void txx9aclc_ac97_cold_reset(struct snd_ac97 *ac97) u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY; __raw_writel(ACCTL_ENLINK, base + ACCTLDIS); - mmiowb(); udelay(1); __raw_writel(ACCTL_ENLINK, base + ACCTLEN); /* wait for primary codec ready status */ -- cgit v1.2.3-59-g8ed1b From 574258222281221444b561b05c3a5fa85947a80c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 4 Apr 2019 09:56:38 +0300 Subject: RDMA/hns: Remove asynchronic QP destroy Verbs destroy callbacks are synchronous operations and can't be delayed. The expectation is that after driver returned from destroy function, the memory can be freed and user won't be able to access it again. Ditch workqueue implementation used in HNS driver. Fixes: d838c481e025 ("IB/hns: Fix the bug when destroy qp") Signed-off-by: Leon Romanovsky Acked-by: oulijun Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_common.h | 33 --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 374 +--------------------------- drivers/infiniband/hw/hns/hns_roce_hw_v1.h | 12 - 3 files changed, 13 insertions(+), 406 deletions(-) (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v1.c') diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h index f4c92a7ac1ce..8e95a1aa1b4f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_common.h +++ b/drivers/infiniband/hw/hns/hns_roce_common.h @@ -57,32 +57,6 @@ #define roce_set_bit(origin, shift, val) \ roce_set_field((origin), (1ul << (shift)), (shift), (val)) -/* - * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon - * SOC, check if a is less than b. - * @a: hardware index value - * @b: hardware index value - * @bits: the number of bits of a and b, range: 0~31. - * - * Hardware index increases continuously till max value, and then restart - * from zero, again and again. Because the bits of reg field is often - * limited, the reg field can only hold the low bits of the hardware index - * in hisilicon SOC. - * In some scenes we need to compare two values(a,b) getted from two reg - * fields in this driver, for example: - * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has - * incresed from 0xffff to 0x1 and a is less than b. - * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a - * is bigger than b. - * - * Return true on a less than b, otherwise false. - */ -#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1) -#define roce_hw_index_shift(bits) (32 - (bits)) -#define roce_hw_index_cmp_lt(a, b, bits) \ - ((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \ - roce_hw_index_shift(bits)) < 0) - #define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 #define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 @@ -271,8 +245,6 @@ #define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -#define ROCEE_SDB_PTR_CMP_BITS 28 - #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) @@ -353,13 +325,8 @@ #define ROCEE_CAEP_AE_MASK_REG 0x6C8 #define ROCEE_CAEP_AE_ST_REG 0x6CC -#define ROCEE_SDB_ISSUE_PTR_REG 0x758 -#define ROCEE_SDB_SEND_PTR_REG 0x75C #define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850 #define ROCEE_SCAEP_WR_CQE_CNT 0x8D0 -#define ROCEE_SDB_INV_CNT_REG 0x9A4 -#define ROCEE_SDB_RETRY_CNT_REG 0x9AC -#define ROCEE_TSP_BP_ST_REG 0x9EC #define ROCEE_ECC_UCERR_ALM0_REG 0xB34 #define ROCEE_ECC_CERR_ALM0_REG 0xB40 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 98c6a41edefd..26d4ed447bea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1511,38 +1511,6 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) return ret; } -static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev) -{ - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_v1_priv *priv; - struct hns_roce_des_qp *des_qp; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - des_qp = &priv->des_qp; - - des_qp->requeue_flag = 1; - des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp"); - if (!des_qp->qp_wq) { - dev_err(dev, "Create destroy qp workqueue failed!\n"); - return -ENOMEM; - } - - return 0; -} - -static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv; - struct hns_roce_des_qp *des_qp; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - des_qp = &priv->des_qp; - - des_qp->requeue_flag = 0; - flush_workqueue(des_qp->qp_wq); - destroy_workqueue(des_qp->qp_wq); -} - static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) { int i = 0; @@ -1661,12 +1629,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) goto error_failed_tptr_init; } - ret = hns_roce_des_qp_init(hr_dev); - if (ret) { - dev_err(dev, "des qp init failed!\n"); - goto error_failed_des_qp_init; - } - ret = hns_roce_free_mr_init(hr_dev); if (ret) { dev_err(dev, "free mr init failed!\n"); @@ -1678,9 +1640,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) return 0; error_failed_free_mr_init: - hns_roce_des_qp_free(hr_dev); - -error_failed_des_qp_init: hns_roce_tptr_free(hr_dev); error_failed_tptr_init: @@ -1698,7 +1657,6 @@ static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) { hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_free_mr_free(hr_dev); - hns_roce_des_qp_free(hr_dev); hns_roce_tptr_free(hr_dev); hns_roce_bt_free(hr_dev); hns_roce_raq_free(hr_dev); @@ -3644,307 +3602,22 @@ static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); } -static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev, - u32 *old_send, u32 *old_retry, - u32 *tsp_st, u32 *success_flags) -{ - __le32 *old_send_tmp, *old_retry_tmp; - u32 sdb_retry_cnt; - u32 sdb_send_ptr; - u32 cur_cnt, old_cnt; - __le32 tmp, tmp1; - u32 send_ptr; - - sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); - tmp = cpu_to_le32(sdb_send_ptr); - tmp1 = cpu_to_le32(sdb_retry_cnt); - cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - - old_send_tmp = (__le32 *)old_send; - old_retry_tmp = (__le32 *)old_retry; - if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) { - old_cnt = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(*old_retry_tmp, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) - *success_flags = 1; - } else { - old_cnt = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) { - *success_flags = 1; - } else { - send_ptr = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(tmp1, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - roce_set_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S, - send_ptr); - } - } -} - -static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - u32 sdb_issue_ptr, - u32 *sdb_inv_cnt, - u32 *wait_stage) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_send_ptr, old_send; - __le32 sdb_issue_ptr_tmp; - __le32 sdb_send_ptr_tmp; - u32 success_flags = 0; - unsigned long end; - u32 old_retry; - u32 inv_cnt; - u32 tsp_st; - __le32 tmp; - - if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 || - *wait_stage < HNS_ROCE_V1_DB_STAGE1) { - dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n", - hr_qp->qpn, *wait_stage); - return -EINVAL; - } - - /* Calculate the total timeout for the entire verification process */ - end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies; - - if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) { - /* Query db process status, until hw process completely */ - sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr, - ROCEE_SDB_PTR_CMP_BITS)) { - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n", - hr_qp->qpn, sdb_issue_ptr, - sdb_send_ptr); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - sdb_send_ptr = roce_read(hr_dev, - ROCEE_SDB_SEND_PTR_REG); - } - - sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr); - sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr); - if (roce_get_field(sdb_issue_ptr_tmp, - ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M, - ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) == - roce_get_field(sdb_send_ptr_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) { - old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); - - do { - tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG); - tmp = cpu_to_le32(tsp_st); - if (roce_get_bit(tmp, - ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) { - *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; - return 0; - } - - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n" - "issue 0x%x send 0x%x.\n", - hr_qp->qpn, - le32_to_cpu(sdb_issue_ptr_tmp), - le32_to_cpu(sdb_send_ptr_tmp)); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - - hns_roce_check_sdb_status(hr_dev, &old_send, - &old_retry, &tsp_st, - &success_flags); - } while (!success_flags); - } - - *wait_stage = HNS_ROCE_V1_DB_STAGE2; - - /* Get list pointer */ - *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n", - hr_qp->qpn, *sdb_inv_cnt); - } - - if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) { - /* Query db's list status, until hw reversal */ - inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - while (roce_hw_index_cmp_lt(inv_cnt, - *sdb_inv_cnt + SDB_INV_CNT_OFFSET, - ROCEE_SDB_CNT_CMP_BITS)) { - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n", - hr_qp->qpn, inv_cnt); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - } - - *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; - } - - return 0; -} - -static int check_qp_reset_state(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_qp_work *qp_work_entry, - int *is_timeout) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_issue_ptr; - int ret; - - if (hr_qp->state != IB_QPS_RESET) { - /* Set qp to ERR, waiting for hw complete processing all dbs */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_ERR); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n", - hr_qp->qpn); - return ret; - } - - /* Record issued doorbell */ - sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG); - qp_work_entry->sdb_issue_ptr = sdb_issue_ptr; - qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1; - - /* Query db process status, until hw process completely */ - ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr, - &qp_work_entry->sdb_inv_cnt, - &qp_work_entry->db_wait_stage); - if (ret) { - dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - hr_qp->qpn); - return ret; - } - - if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) { - qp_work_entry->sche_cnt = 0; - *is_timeout = 1; - return 0; - } - - /* Modify qp to reset before destroying qp */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_RESET); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", - hr_qp->qpn); - return ret; - } - } - - return 0; -} - -static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) -{ - struct hns_roce_qp_work *qp_work_entry; - struct hns_roce_v1_priv *priv; - struct hns_roce_dev *hr_dev; - struct hns_roce_qp *hr_qp; - struct device *dev; - unsigned long qpn; - int ret; - - qp_work_entry = container_of(work, struct hns_roce_qp_work, work); - hr_dev = to_hr_dev(qp_work_entry->ib_dev); - dev = &hr_dev->pdev->dev; - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - hr_qp = qp_work_entry->qp; - qpn = hr_qp->qpn; - - dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); - - qp_work_entry->sche_cnt++; - - /* Query db process status, until hw process completely */ - ret = check_qp_db_process_status(hr_dev, hr_qp, - qp_work_entry->sdb_issue_ptr, - &qp_work_entry->sdb_inv_cnt, - &qp_work_entry->db_wait_stage); - if (ret) { - dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - qpn); - return; - } - - if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK && - priv->des_qp.requeue_flag) { - queue_work(priv->des_qp.qp_wq, work); - return; - } - - /* Modify qp to reset before destroying qp */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_RESET); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); - return; - } - - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); - - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { - /* RC QP, release QPN */ - hns_roce_release_range_qp(hr_dev, qpn, 1); - kfree(hr_qp); - } else - kfree(hr_to_hr_sqp(hr_qp)); - - kfree(qp_work_entry); - - dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); -} - int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_work qp_work_entry; - struct hns_roce_qp_work *qp_work; - struct hns_roce_v1_priv *priv; struct hns_roce_cq *send_cq, *recv_cq; - bool is_user = ibqp->uobject; - int is_timeout = 0; int ret; - ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout); - if (ret) { - dev_err(dev, "QP reset state check failed(%d)!\n", ret); + ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); + if (ret) return ret; - } send_cq = to_hr_cq(hr_qp->ibqp.send_cq); recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); hns_roce_lock_cqs(send_cq, recv_cq); - if (!is_user) { + if (!udata) { __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? to_hr_srq(hr_qp->ibqp.srq) : NULL); if (send_cq != recv_cq) @@ -3952,18 +3625,16 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) } hns_roce_unlock_cqs(send_cq, recv_cq); - if (!is_timeout) { - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); + hns_roce_qp_remove(hr_dev, hr_qp); + hns_roce_qp_free(hr_dev, hr_qp); - /* RC QP, release QPN */ - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - } + /* RC QP, release QPN */ + if (hr_qp->ibqp.qp_type == IB_QPT_RC) + hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); - if (is_user) + if (udata) ib_umem_release(hr_qp->umem); else { kfree(hr_qp->sq.wrid); @@ -3972,29 +3643,10 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); } - if (!is_timeout) { - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - kfree(hr_qp); - else - kfree(hr_to_hr_sqp(hr_qp)); - } else { - qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL); - if (!qp_work) - return -ENOMEM; - - INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn); - qp_work->ib_dev = &hr_dev->ib_dev; - qp_work->qp = hr_qp; - qp_work->db_wait_stage = qp_work_entry.db_wait_stage; - qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr; - qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt; - qp_work->sche_cnt = qp_work_entry.sche_cnt; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - queue_work(priv->des_qp.qp_wq, &qp_work->work); - dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn); - } - + if (hr_qp->ibqp.qp_type == IB_QPT_RC) + kfree(hr_qp); + else + kfree(hr_to_hr_sqp(hr_qp)); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 1a2c38785c7f..52307b2c7100 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -110,11 +110,6 @@ #define HNS_ROCE_V1_EXT_ODB_ALFUL \ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_DB_WAIT_OK 0 -#define HNS_ROCE_V1_DB_STAGE1 1 -#define HNS_ROCE_V1_DB_STAGE2 2 -#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000 -#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20 #define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 #define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 #define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 @@ -162,7 +157,6 @@ #define SQ_PSN_SHIFT 8 #define QKEY_VAL 0x80010000 #define SDB_INV_CNT_OFFSET 8 -#define SDB_ST_CMP_VAL 8 #define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 @@ -1068,11 +1062,6 @@ struct hns_roce_qp_work { u32 sche_cnt; }; -struct hns_roce_des_qp { - struct workqueue_struct *qp_wq; - int requeue_flag; -}; - struct hns_roce_mr_free_work { struct work_struct work; struct ib_device *ib_dev; @@ -1100,7 +1089,6 @@ struct hns_roce_v1_priv { struct hns_roce_raq_table raq_table; struct hns_roce_bt_table bt_table; struct hns_roce_tptr_table tptr_table; - struct hns_roce_des_qp des_qp; struct hns_roce_free_mr free_mr; }; -- cgit v1.2.3-59-g8ed1b From 619122be3d40c835eb5fad9e326780909926495d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 20 May 2019 09:43:53 +0300 Subject: RDMA/hns: Fix PD memory leak for internal allocation free_pd is allocated internally by the driver hence needs to be freed internally too or it leaks. Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core") Signed-off-by: Leon Romanovsky Acked-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v1.c') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 4c5d0f160c10..e068a02122f5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -899,6 +899,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); + kfree(&free_mr->mr_free_pd->ibpd); } static int hns_roce_db_init(struct hns_roce_dev *hr_dev) -- cgit v1.2.3-59-g8ed1b