aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 18:20:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 18:20:51 -0800
commit3954e1d0310e30e743431b58918825c4d4fe8812 (patch)
tree2627da50ecad4c251ffdbba36e1300592560cf7f /drivers/infiniband/hw/mlx5/mr.c
parentMerge tag 'fbdev-v4.21' of git://github.com/bzolnier/linux (diff)
parentinfiniband/qedr: Potential null ptr dereference of qp (diff)
downloadlinux-dev-3954e1d0310e30e743431b58918825c4d4fe8812.tar.xz
linux-dev-3954e1d0310e30e743431b58918825c4d4fe8812.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Over the break a few defects were found, so this is a -rc style pull request of various small things that have been posted. - An attempt to shorten RCU grace period driven delays showed crashes during heavier testing, and has been entirely reverted - A missed merge/rebase error between the advise_mr and ib_device_ops series - Some small static analysis driven fixes from Julia and Aditya - Missed ability to create a XRC_INI in the devx verbs interop series" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: infiniband/qedr: Potential null ptr dereference of qp infiniband: bnxt_re: qplib: Check the return value of send_message IB/ipoib: drop useless LIST_HEAD IB/core: Add advise_mr to the list of known ops Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads" IB/mlx5: Allow XRC INI usage via verbs in DEVX context
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1bd8c1b1dba1..fd6ea1f75085 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
- if (mr->umem && mr->umem->is_odp)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
return err;
@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- bool odp_mkey_exist = false;
-#endif
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
-#endif
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {