aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/usnic
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2019-02-06 09:59:15 -0800
committerJason Gunthorpe <jgg@mellanox.com>2019-02-07 12:54:02 -0700
commit70f8a3ca68d3e1f3344d959981ca55d5f6ec77f7 (patch)
tree5c43234a8f1697bf3d1a4a8926a014b922b046ff /drivers/infiniband/hw/usnic
parentRDMA/iwpm: move kdoc comments to functions (diff)
downloadlinux-dev-70f8a3ca68d3e1f3344d959981ca55d5f6ec77f7.tar.xz
linux-dev-70f8a3ca68d3e1f3344d959981ca55d5f6ec77f7.zip
mm: make mm->pinned_vm an atomic64 counter
Taking a sleeping lock to _only_ increment a variable is quite the overkill, and pretty much all users do this. Furthermore, some drivers (ie: infiniband and scif) that need pinned semantics can go to quite some trouble to actually delay via workqueue (un)accounting for pinned pages when not possible to acquire it. By making the counter atomic we no longer need to hold the mmap_sem and can simply some code around it for pinned_vm users. The counter is 64-bit such that we need not worry about overflows such as rdma user input controlled from userspace. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Christoph Lameter <cl@linux.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/usnic')
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index ce01a59fccc4..854436a2b437 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -129,7 +129,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
uiomr->owning_mm = mm = current->mm;
down_write(&mm->mmap_sem);
- locked = npages + current->mm->pinned_vm;
+ locked = npages + atomic64_read(&current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -187,7 +187,7 @@ out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
else {
- mm->pinned_vm = locked;
+ atomic64_set(&mm->pinned_vm, locked);
mmgrab(uiomr->owning_mm);
}
@@ -441,7 +441,7 @@ static void usnic_uiom_release_defer(struct work_struct *work)
container_of(work, struct usnic_uiom_reg, work);
down_write(&uiomr->owning_mm->mmap_sem);
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);
@@ -469,7 +469,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
} else {
down_write(&uiomr->owning_mm->mmap_sem);
}
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);