aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/rdma
diff options
context:
space:
mode:
authorMitko Haralanov <mitko.haralanov@intel.com>2016-04-12 10:46:16 -0700
committerDoug Ledford <dledford@redhat.com>2016-04-28 12:00:38 -0400
commit849e3e9398608c26a7c54bf9fbf3288f7ced6bfb (patch)
tree77ebe0377b3d1a6f968fc1468b7f201fcb36e275 /drivers/staging/rdma
parentIB/hfi1: Fix deadlock caused by locking with wrong scope (diff)
downloadlinux-dev-849e3e9398608c26a7c54bf9fbf3288f7ced6bfb.tar.xz
linux-dev-849e3e9398608c26a7c54bf9fbf3288f7ced6bfb.zip
IB/hfi1: Prevent unpinning of wrong pages
The routine used by the SDMA cache to handle already cached nodes can extend an already existing node. In its error handling code, the routine will unpin pages when not all pages of the buffer extension were pinned. There was a bug in that part of the routine, which would mistakenly unpin pages from the original set rather than the newly pinned pages. This commit fixes that bug by offsetting the page array to the proper place pointing at the beginning of the newly pinned pages. Reviewed-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/staging/rdma')
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index e08c74fe4c6b..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+ unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
@@ -1110,7 +1111,8 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages,
+ pinned);
ret = -EFAULT;
goto bail;
}
@@ -1150,9 +1152,9 @@ bail:
}
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned npages)
+ unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages);
}
@@ -1566,7 +1568,8 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
* prevent a deadlock when hfi1_release_user_pages() attempts to
* take the mmap_sem, which the MMU notifier has already taken.
*/
- unpin_vector_pages(mm ? NULL : current->mm, node->pages, node->npages);
+ unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
+ node->npages);
/*
* If called by the MMU notifier, we have to adjust the pinned
* page count ourselves.