From f19bd643dbded8672bfeffe9e51322464e4a9239 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 12 Apr 2016 10:45:57 -0700 Subject: IB/hfi1: Prevent NULL pointer deferences in caching code There is a potential kernel crash when the MMU notifier calls the invalidation routines in the hfi1 pinned page caching code for sdma. The invalidation routine could call the remove callback for the node, which in turn ends up dereferencing the current task_struct to get a pointer to the mm_struct. However, the mm_struct pointer could be NULL resulting in the following backtrace: BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [] sdma_rb_remove+0xaa/0x100 [hfi1] 15 task: ffff88085e66e080 ti: ffff88085c244000 task.ti: ffff88085c244000 RIP: 0010:[] [] sdma_rb_remove+0xaa/0x100 [hfi1] RSP: 0000:ffff88085c245878 EFLAGS: 00010002 RAX: 0000000000000000 RBX: ffff88105b9bbd40 RCX: ffffea003931a830 RDX: 0000000000000004 RSI: ffff88105754a9c0 RDI: ffff88105754a9c0 RBP: ffff88085c245890 R08: ffff88105b9bbd70 R09: 00000000fffffffb R10: ffff88105b9bbd58 R11: 0000000000000013 R12: ffff88105754a9c0 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88105b9bbd40 FS: 0000000000000000(0000) GS:ffff88107ef40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000a8 CR3: 0000000001a0b000 CR4: 00000000001407e0 Stack: ffff88105b9bbd40 ffff88080ec481a8 ffff88080ec481b8 ffff88085c2458c0 ffffffffa03fa00e ffff88080ec48190 ffff88080ed9cd00 0000000001024000 0000000000000000 ffff88085c245920 ffffffffa03fa0e7 0000000000000282 Call Trace: [] __mmu_rb_remove.isra.5+0x5e/0x70 [hfi1] [] mmu_notifier_mem_invalidate+0xc7/0xf0 [hfi1] [] mmu_notifier_page+0x13/0x20 [hfi1] [] __mmu_notifier_invalidate_page+0x50/0x70 [] try_to_unmap_one+0x20b/0x470 [] try_to_unmap_anon+0xa7/0x120 [] try_to_unmap+0x4d/0x60 [] shrink_page_list+0x2eb/0x9d0 [] shrink_inactive_list+0x243/0x490 [] shrink_lruvec+0x4c1/0x640 [] shrink_zone+0x31/0x100 [] kswapd_shrink_zone.constprop.62+0xef/0x1c0 [] kswapd+0x403/0x7e0 [] ? shrink_all_memory+0xf0/0xf0 [] kthread+0xc0/0xd0 [] ? insert_kthread_work+0x40/0x40 [] ret_from_fork+0x7c/0xb0 [] ? insert_kthread_work+0x40/0x40 To correct this, the mm_struct passed to us by the MMU notifier is used (which is what should have been done to begin with). This avoids the broken derefences and ensures that the correct mm_struct is used. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 24 ++++++++++++++---------- drivers/staging/rdma/hfi1/mmu_rb.h | 3 ++- drivers/staging/rdma/hfi1/user_exp_rcv.c | 9 +++++---- drivers/staging/rdma/hfi1/user_sdma.c | 24 ++++++++++++++++-------- 4 files changed, 37 insertions(+), 23 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index c7ad0164ea9a..eac4d041d351 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); static void mmu_notifier_mem_invalidate(struct mmu_notifier *, + struct mm_struct *, unsigned long, unsigned long); static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, unsigned long, unsigned long); @@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) rbnode = rb_entry(node, struct mmu_rb_node, node); rb_erase(node, root); if (handler->ops->remove) - handler->ops->remove(root, rbnode, false); + handler->ops->remove(root, rbnode, NULL); } } @@ -201,14 +202,14 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, } static void __mmu_rb_remove(struct mmu_rb_handler *handler, - struct mmu_rb_node *node, bool arg) + struct mmu_rb_node *node, struct mm_struct *mm) { /* Validity of handler and node pointers has been checked by caller. */ hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, node->len); __mmu_int_rb_remove(node, handler->root); if (handler->ops->remove) - handler->ops->remove(handler->root, node, arg); + handler->ops->remove(handler->root, node, mm); } struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, @@ -237,7 +238,7 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) return; spin_lock_irqsave(&handler->lock, flags); - __mmu_rb_remove(handler, node, false); + __mmu_rb_remove(handler, node, NULL); spin_unlock_irqrestore(&handler->lock, flags); } @@ -260,7 +261,7 @@ unlock: static inline void mmu_notifier_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long addr) { - mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); + mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); } static inline void mmu_notifier_range_start(struct mmu_notifier *mn, @@ -268,25 +269,28 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn, unsigned long start, unsigned long end) { - mmu_notifier_mem_invalidate(mn, start, end); + mmu_notifier_mem_invalidate(mn, mm, start, end); } static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, + struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_rb_handler *handler = container_of(mn, struct mmu_rb_handler, mn); struct rb_root *root = handler->root; - struct mmu_rb_node *node; + struct mmu_rb_node *node, *ptr = NULL; unsigned long flags; spin_lock_irqsave(&handler->lock, flags); - for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; - node = __mmu_int_rb_iter_next(node, start, end - 1)) { + for (node = __mmu_int_rb_iter_first(root, start, end - 1); + node; node = ptr) { + /* Guard against node removal. */ + ptr = __mmu_int_rb_iter_next(node, start, end - 1); hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", node->addr, node->len); if (handler->ops->invalidate(root, node)) - __mmu_rb_remove(handler, node, true); + __mmu_rb_remove(handler, node, mm); } spin_unlock_irqrestore(&handler->lock, flags); } diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index f8523fdb8a18..19a306e83c7d 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -59,7 +59,8 @@ struct mmu_rb_node { struct mmu_rb_ops { bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); int (*insert)(struct rb_root *, struct mmu_rb_node *); - void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); + void (*remove)(struct rb_root *, struct mmu_rb_node *, + struct mm_struct *); int (*invalidate)(struct rb_root *, struct mmu_rb_node *); }; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 0861e095df8d..5b72849bbd71 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *); static int set_rcvarray_entry(struct file *, unsigned long, u32, struct tid_group *, struct page **, unsigned); static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); -static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); +static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, + struct mm_struct *); static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); static int program_rcvarray(struct file *, unsigned long, struct tid_group *, struct tid_pageset *, unsigned, u16, struct page **, @@ -899,7 +900,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; if (HFI1_CAP_IS_USET(TID_UNMAP)) - mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); + mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL); else hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); @@ -965,7 +966,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, continue; if (HFI1_CAP_IS_USET(TID_UNMAP)) mmu_rb_remove(&fd->tid_rb_root, - &node->mmu, false); + &node->mmu, NULL); else hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); @@ -1032,7 +1033,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node) } static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, - bool notifier) + struct mm_struct *mm) { struct hfi1_filedata *fdata = container_of(root, struct hfi1_filedata, tid_rb_root); diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index ab6b6a42000f..e08c74fe4c6b 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -299,7 +299,8 @@ static int defer_packet_queue( static void activate_packet_queue(struct iowait *, int); static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); -static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); +static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, + struct mm_struct *); static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); static struct mmu_rb_ops sdma_rb_ops = { @@ -1063,8 +1064,10 @@ static int pin_vector_pages(struct user_sdma_request *req, rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, (unsigned long)iovec->iov.iov_base, iovec->iov.iov_len); - if (rb_node) + if (rb_node && !IS_ERR(rb_node)) node = container_of(rb_node, struct sdma_mmu_node, rb); + else + rb_node = NULL; if (!node) { node = kzalloc(sizeof(*node), GFP_KERNEL); @@ -1502,7 +1505,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) &req->pq->sdma_rb_root, (unsigned long)req->iovs[i].iov.iov_base, req->iovs[i].iov.iov_len); - if (!mnode) + if (!mnode || IS_ERR(mnode)) continue; node = container_of(mnode, struct sdma_mmu_node, rb); @@ -1547,7 +1550,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) } static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, - bool notifier) + struct mm_struct *mm) { struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); @@ -1557,14 +1560,19 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, node->pq->n_locked -= node->npages; spin_unlock(&node->pq->evict_lock); - unpin_vector_pages(notifier ? NULL : current->mm, node->pages, - node->npages); + /* + * If mm is set, we are being called by the MMU notifier and we + * should not pass a mm_struct to unpin_vector_page(). This is to + * prevent a deadlock when hfi1_release_user_pages() attempts to + * take the mmap_sem, which the MMU notifier has already taken. + */ + unpin_vector_pages(mm ? NULL : current->mm, node->pages, node->npages); /* * If called by the MMU notifier, we have to adjust the pinned * page count ourselves. */ - if (notifier) - current->mm->pinned_vm -= node->npages; + if (mm) + mm->pinned_vm -= node->npages; kfree(node); } -- cgit v1.2.3-59-g8ed1b From de82bdff62a9078a6e4f1452e2f2604686e51e49 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 12 Apr 2016 10:46:03 -0700 Subject: IB/hfi1: Fix deadlock caused by locking with wrong scope The locking around the interval RB tree is designed to prevent access to the tree while it's being modified. The locking in its current form is too overzealous, which is causing a deadlock in certain cases with the following backtrace: Kernel panic - not syncing: Watchdog detected hard LOCKUP on cpu 0 CPU: 0 PID: 5836 Comm: IMB-MPI1 Tainted: G O 3.12.18-wfr+ #1 0000000000000000 ffff88087f206c50 ffffffff814f1caa ffffffff817b53f0 ffff88087f206cc8 ffffffff814ecd56 0000000000000010 ffff88087f206cd8 ffff88087f206c78 0000000000000000 0000000000000000 0000000000001662 Call Trace: [] dump_stack+0x45/0x56 [] panic+0xc2/0x1cb [] ? restart_watchdog_hrtimer+0x50/0x50 [] watchdog_overflow_callback+0xc2/0xd0 [] __perf_event_overflow+0x8e/0x2b0 [] perf_event_overflow+0x14/0x20 [] intel_pmu_handle_irq+0x1b6/0x390 [] perf_event_nmi_handler+0x2b/0x50 [] nmi_handle.isra.3+0x88/0x180 [] do_nmi+0x169/0x310 [] end_repeat_nmi+0x1e/0x2e [] ? unmap_single+0x30/0x30 [] ? _raw_spin_lock_irqsave+0x2d/0x40 [] ? _raw_spin_lock_irqsave+0x2d/0x40 [] ? _raw_spin_lock_irqsave+0x2d/0x40 <> [] hfi1_mmu_rb_search+0x38/0x70 [hfi1] [] user_sdma_free_request+0xcb/0x120 [hfi1] [] user_sdma_txreq_cb+0x263/0x350 [hfi1] [] ? sdma_txclean+0x27/0x1c0 [hfi1] [] ? user_sdma_send_pkts+0x1710/0x1710 [hfi1] [] sdma_make_progress+0x166/0x480 [hfi1] [] ? ttwu_do_wakeup+0x19/0xd0 [] sdma_engine_interrupt+0x8e/0x100 [hfi1] [] sdma_interrupt+0x5d/0xa0 [hfi1] [] handle_irq_event_percpu+0x47/0x1d0 [] handle_irq_event+0x37/0x60 [] handle_edge_irq+0x6f/0x120 [] handle_irq+0xbf/0x150 [] ? irq_enter+0x17/0x80 [] do_IRQ+0x4d/0xc0 [] common_interrupt+0x6a/0x6a [] ? finish_task_switch+0x54/0xe0 [] __schedule+0x3b6/0x7e0 [] __cond_resched+0x26/0x30 [] _cond_resched+0x3a/0x50 [] down_write+0x12/0x30 [] hfi1_release_user_pages+0x69/0x90 [hfi1] [] sdma_rb_remove+0x9a/0xc0 [hfi1] [] __mmu_rb_remove.isra.5+0x5d/0x70 [hfi1] [] hfi1_mmu_rb_remove+0x56/0x70 [hfi1] [] hfi1_user_sdma_process_request+0x74b/0x1160 [hfi1] [] hfi1_aio_write+0xc3/0x100 [hfi1] [] do_sync_readv_writev+0x4c/0x80 [] do_readv_writev+0xbb/0x230 [] ? fsnotify+0x241/0x320 [] ? finish_task_switch+0x54/0xe0 [] vfs_writev+0x35/0x60 [] SyS_writev+0x49/0xc0 [] ? __audit_syscall_exit+0x1f6/0x2a0 [] system_call_fastpath+0x16/0x1b As evident from the backtrace above, the process was being put to sleep while holding the lock. Limiting the scope of the lock only to the RB tree operation fixes the above error allowing for proper locking and the process being put to sleep when needed. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index eac4d041d351..b3f0682a36c9 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -177,7 +177,7 @@ unlock: return ret; } -/* Caller must host handler lock */ +/* Caller must hold handler lock */ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, unsigned long addr, unsigned long len) @@ -201,13 +201,19 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, return node; } +/* Caller must *not* hold handler lock. */ static void __mmu_rb_remove(struct mmu_rb_handler *handler, struct mmu_rb_node *node, struct mm_struct *mm) { + unsigned long flags; + /* Validity of handler and node pointers has been checked by caller. */ hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, node->len); + spin_lock_irqsave(&handler->lock, flags); __mmu_int_rb_remove(node, handler->root); + spin_unlock_irqrestore(&handler->lock, flags); + if (handler->ops->remove) handler->ops->remove(handler->root, node, mm); } @@ -232,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) { struct mmu_rb_handler *handler = find_mmu_handler(root); - unsigned long flags; if (!handler || !node) return; - spin_lock_irqsave(&handler->lock, flags); __mmu_rb_remove(handler, node, NULL); - spin_unlock_irqrestore(&handler->lock, flags); } static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) @@ -289,8 +292,11 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, ptr = __mmu_int_rb_iter_next(node, start, end - 1); hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", node->addr, node->len); - if (handler->ops->invalidate(root, node)) + if (handler->ops->invalidate(root, node)) { + spin_unlock_irqrestore(&handler->lock, flags); __mmu_rb_remove(handler, node, mm); + spin_lock_irqsave(&handler->lock, flags); + } } spin_unlock_irqrestore(&handler->lock, flags); } -- cgit v1.2.3-59-g8ed1b From 849e3e9398608c26a7c54bf9fbf3288f7ced6bfb Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 12 Apr 2016 10:46:16 -0700 Subject: IB/hfi1: Prevent unpinning of wrong pages The routine used by the SDMA cache to handle already cached nodes can extend an already existing node. In its error handling code, the routine will unpin pages when not all pages of the buffer extension were pinned. There was a bug in that part of the routine, which would mistakenly unpin pages from the original set rather than the newly pinned pages. This commit fixes that bug by offsetting the page array to the proper place pointing at the beginning of the newly pinned pages. Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index e08c74fe4c6b..d53a659548e0 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, struct user_sdma_iovec *); -static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); +static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned, + unsigned); static int check_header_template(struct user_sdma_request *, struct hfi1_pkt_header *, u32, u32); static int set_txreq_header(struct user_sdma_request *, @@ -1110,7 +1111,8 @@ retry: goto bail; } if (pinned != npages) { - unpin_vector_pages(current->mm, pages, pinned); + unpin_vector_pages(current->mm, pages, node->npages, + pinned); ret = -EFAULT; goto bail; } @@ -1150,9 +1152,9 @@ bail: } static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, - unsigned npages) + unsigned start, unsigned npages) { - hfi1_release_user_pages(mm, pages, npages, 0); + hfi1_release_user_pages(mm, pages + start, npages, 0); kfree(pages); } @@ -1566,7 +1568,8 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, * prevent a deadlock when hfi1_release_user_pages() attempts to * take the mmap_sem, which the MMU notifier has already taken. */ - unpin_vector_pages(mm ? NULL : current->mm, node->pages, node->npages); + unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0, + node->npages); /* * If called by the MMU notifier, we have to adjust the pinned * page count ourselves. -- cgit v1.2.3-59-g8ed1b From b9b06cb6fedab10665a2d527464b45f332d17465 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 20 Apr 2016 06:05:30 -0700 Subject: IB/hfi1: Fix missing lock/unlock in verbs drain callback The iowait_sdma_drained() callback lacked locking to protect the qp s_flags field. This causes the s_flags to be out of sync on multiple CPUs, potentially corrupting the s_flags. Fixes: a545f5308b6c ("staging/rdma/hfi: fix CQ completion order issue") Reviewed-by: Sebastian Sanchez Signed-off-by: Mike Marciniszyn Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 29a5ad28019b..dc9119e1b458 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait) * do the flush work until that QP's * sdma work has finished. */ + spin_lock(&qp->s_lock); if (qp->s_flags & RVT_S_WAIT_DMA) { qp->s_flags &= ~RVT_S_WAIT_DMA; hfi1_schedule_send(qp); } + spin_unlock(&qp->s_lock); } /** -- cgit v1.2.3-59-g8ed1b From 94158442eb0c66bbb0b733999e108fa26a7673ef Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 20 Apr 2016 06:05:36 -0700 Subject: IB/hfi1: Don't attempt to free resources if initialization failed Attempting to free resources which have not been allocated and initialized properly led to the following kernel backtrace: BUG: unable to handle kernel NULL pointer dereference at (null) IP: [] unlock_exp_tids.isra.8+0x2e/0x120 [hfi1] PGD 852a43067 PUD 85d4a6067 PMD 0 Oops: 0000 [#1] SMP CPU: 0 PID: 2831 Comm: osu_bw Tainted: G IO 3.12.18-wfr+ #1 task: ffff88085b15b540 ti: ffff8808588fe000 task.ti: ffff8808588fe000 RIP: 0010:[] [] unlock_exp_tids.isra.8+0x2e/0x120 [hfi1] RSP: 0018:ffff8808588ffde0 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff880858a31800 RCX: 0000000000000000 RDX: ffff88085d971bc0 RSI: ffff880858a318f8 RDI: ffff880858a318c0 RBP: ffff8808588ffe20 R08: 0000000000000000 R09: 0000000000000000 R10: ffff88087ffd6f40 R11: 0000000001100348 R12: ffff880852900000 R13: ffff880858a318c0 R14: 0000000000000000 R15: ffff88085d971be8 FS: 00007f4674e83740(0000) GS:ffff88087f400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000085c377000 CR4: 00000000001407f0 Stack: ffffffffa0941a71 ffff880858a318f8 ffff88085d971bc0 ffff880858a31800 ffff880852900000 ffff880858a31800 00000000003ffff7 ffff88085d971bc0 ffff8808588ffe60 ffffffffa09663fc ffff8808588ffe60 ffff880858a31800 Call Trace: [] ? find_mmu_handler+0x51/0x70 [hfi1] [] hfi1_user_exp_rcv_free+0x6c/0x120 [hfi1] [] hfi1_file_close+0x1a9/0x340 [hfi1] [] __fput+0xe9/0x270 [] ____fput+0xe/0x10 [] task_work_run+0xa7/0xe0 [] do_notify_resume+0x59/0x80 [] int_signal+0x12/0x17 This commit re-arranges the context initialization code in a way that would allow for context event flags to be used to determine whether the context has been successfully initialized. In turn, this can be used to skip the resource de-allocation if they were never allocated in the first place. Fixes: 3abb33ac6521 ("staging/hfi1: Add TID cache receive init and free funcs") Reviewed-by: Dennis Dalessandro Signed-off-by: Mitko Haralanov Reviewed-by: Leon Romanovsky --- drivers/staging/rdma/hfi1/file_ops.c | 60 ++++++++++++++------------------ drivers/staging/rdma/hfi1/user_exp_rcv.c | 2 ++ 2 files changed, 29 insertions(+), 33 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 8396dc5fb6c1..ec6c2269d739 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -791,15 +791,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) spin_unlock_irqrestore(&dd->uctxt_lock, flags); dd->rcd[uctxt->ctxt] = NULL; + + hfi1_user_exp_rcv_free(fdata); + hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); + uctxt->rcvwait_to = 0; uctxt->piowait_to = 0; uctxt->rcvnowait = 0; uctxt->pionowait = 0; uctxt->event_flags = 0; - hfi1_user_exp_rcv_free(fdata); - hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); - hfi1_stats.sps_ctxts--; if (++dd->freectxts == dd->num_user_contexts) aspm_enable_all(dd); @@ -1127,27 +1128,13 @@ bail: static int user_init(struct file *fp) { - int ret; unsigned int rcvctrl_ops = 0; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; /* make sure that the context has already been setup */ - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { - ret = -EFAULT; - goto done; - } - - /* - * Subctxts don't need to initialize anything since master - * has done it. - */ - if (fd->subctxt) { - ret = wait_event_interruptible(uctxt->wait, !test_bit( - HFI1_CTXT_MASTER_UNINIT, - &uctxt->event_flags)); - goto expected; - } + if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) + return -EFAULT; /* initialize poll variables... */ uctxt->urgent = 0; @@ -1202,19 +1189,7 @@ static int user_init(struct file *fp) wake_up(&uctxt->wait); } -expected: - /* - * Expected receive has to be setup for all processes (including - * shared contexts). However, it has to be done after the master - * context has been fully configured as it depends on the - * eager/expected split of the RcvArray entries. - * Setting it up here ensures that the subcontexts will be waiting - * (due to the above wait_event_interruptible() until the master - * is setup. - */ - ret = hfi1_user_exp_rcv_init(fp); -done: - return ret; + return 0; } static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) @@ -1261,7 +1236,7 @@ static int setup_ctxt(struct file *fp) int ret = 0; /* - * Context should be set up only once (including allocation and + * Context should be set up only once, including allocation and * programming of eager buffers. This is done if context sharing * is not requested or by the master process. */ @@ -1282,8 +1257,27 @@ static int setup_ctxt(struct file *fp) if (ret) goto done; } + } else { + ret = wait_event_interruptible(uctxt->wait, !test_bit( + HFI1_CTXT_MASTER_UNINIT, + &uctxt->event_flags)); + if (ret) + goto done; } + ret = hfi1_user_sdma_alloc_queues(uctxt, fp); + if (ret) + goto done; + /* + * Expected receive has to be setup for all processes (including + * shared contexts). However, it has to be done after the master + * context has been fully configured as it depends on the + * eager/expected split of the RcvArray entries. + * Setting it up here ensures that the subcontexts will be waiting + * (due to the above wait_event_interruptible() until the master + * is setup. + */ + ret = hfi1_user_exp_rcv_init(fp); if (ret) goto done; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 5b72849bbd71..8bd56d5c783d 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -255,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) struct hfi1_ctxtdata *uctxt = fd->uctxt; struct tid_group *grp, *gptr; + if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) + return 0; /* * The notifier would have been removed when the process'es mm * was freed. -- cgit v1.2.3-59-g8ed1b From 7723d8c2445c4dfa91f8df42703b56f8ade59af7 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Fri, 22 Apr 2016 11:17:03 -0700 Subject: IB/hfi1: Use kernel default llseek for ui device The ui device llseek had a mistake with SEEK_END and did not fully follow seek semantics. Correct all this by using a kernel supplied function for fixed size devices. Cc: Al Viro Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index ec6c2269d739..541529589736 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1559,29 +1559,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) { struct hfi1_devdata *dd = filp->private_data; - switch (whence) { - case SEEK_SET: - break; - case SEEK_CUR: - offset += filp->f_pos; - break; - case SEEK_END: - offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) - - offset; - break; - default: - return -EINVAL; - } - - if (offset < 0) - return -EINVAL; - - if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) - return -EINVAL; - - filp->f_pos = offset; - - return filp->f_pos; + return fixed_size_llseek(filp, offset, whence, + (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); } /* NOTE: assumes unsigned long is 8 bytes */ -- cgit v1.2.3-59-g8ed1b From e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Sun, 10 Apr 2016 19:13:13 -0600 Subject: IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn Signed-off-by: Linus Torvalds Signed-off-by: Jason Gunthorpe [ Expanded check to all known write() entry points ] Cc: stable@vger.kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/core/ucm.c | 4 ++++ drivers/infiniband/core/ucma.c | 3 +++ drivers/infiniband/core/uverbs_main.c | 5 +++++ drivers/infiniband/hw/qib/qib_file_ops.c | 5 +++++ drivers/staging/rdma/hfi1/TODO | 2 +- drivers/staging/rdma/hfi1/file_ops.c | 6 ++++++ include/rdma/ib.h | 16 ++++++++++++++++ 7 files changed, 40 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 4a9aa0433b07..7713ef089c3c 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -48,6 +48,7 @@ #include +#include #include #include #include @@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, struct ib_ucm_cmd_hdr hdr; ssize_t result; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (len < sizeof(hdr)) return -EINVAL; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index dd3bcceadfde..c0f3826abb30 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, struct rdma_ucm_cmd_hdr hdr; ssize_t ret; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (len < sizeof(hdr)) return -EINVAL; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 28ba2cc81535..31f422a70623 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -48,6 +48,8 @@ #include +#include + #include "uverbs.h" MODULE_AUTHOR("Roland Dreier"); @@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, int srcu_key; ssize_t ret; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (count < sizeof hdr) return -EINVAL; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index e449e394963f..24f4a782e0f4 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -45,6 +45,8 @@ #include #include +#include + #include "qib.h" #include "qib_common.h" #include "qib_user_sdma.h" @@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data, ssize_t ret = 0; void *dest; + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) + return -EACCES; + if (count < sizeof(cmd.type)) { ret = -EINVAL; goto bail; diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO index 05de0dad8762..4c6f1d7d2eaf 100644 --- a/drivers/staging/rdma/hfi1/TODO +++ b/drivers/staging/rdma/hfi1/TODO @@ -3,4 +3,4 @@ July, 2015 - Remove unneeded file entries in sysfs - Remove software processing of IB protocol and place in library for use by qib, ipath (if still present), hfi1, and eventually soft-roce - +- Replace incorrect uAPI diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 541529589736..c1c5bf82addb 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -49,6 +49,8 @@ #include #include +#include + #include "hfi.h" #include "pio.h" #include "device.h" @@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, int uctxt_required = 1; int must_be_root = 0; + /* FIXME: This interface cannot continue out of staging */ + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) + return -EACCES; + if (count < sizeof(cmd)) { ret = -EINVAL; goto bail; diff --git a/include/rdma/ib.h b/include/rdma/ib.h index cf8f9e700e48..a6b93706b0fc 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -34,6 +34,7 @@ #define _RDMA_IB_H #include +#include struct ib_addr { union { @@ -86,4 +87,19 @@ struct sockaddr_ib { __u64 sib_scope_id; }; +/* + * The IB interfaces that use write() as bi-directional ioctl() are + * fundamentally unsafe, since there are lots of ways to trigger "write()" + * calls from various contexts with elevated privileges. That includes the + * traditional suid executable error message writes, but also various kernel + * interfaces that can write to file descriptors. + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static inline bool ib_safe_file_access(struct file *filp) +{ + return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); +} + #endif /* _RDMA_IB_H */ -- cgit v1.2.3-59-g8ed1b