diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 41 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-array.c | 14 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence.c | 1 | ||||
-rw-r--r-- | drivers/dma-buf/reservation.c | 120 | ||||
-rw-r--r-- | drivers/dma-buf/sw_sync.c | 10 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 4 |
6 files changed, 126 insertions, 64 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a038dcf5361..d78d5fc173dc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -135,10 +135,10 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: * - * - Checking for POLLIN, i.e. read access, can be use to query the state of the + * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the * most recent write or exclusive fence. * - * - Checking for POLLOUT, i.e. write access, can be used to query the state of + * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of * all attached fences, shared and exclusive ones. * * Note that this only signals the completion of the respective fences, i.e. the @@ -157,24 +157,24 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) spin_unlock_irqrestore(&dcb->poll->lock, flags); } -static unsigned int dma_buf_poll(struct file *file, poll_table *poll) +static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; struct reservation_object *resv; struct reservation_object_list *fobj; struct dma_fence *fence_excl; - unsigned long events; + __poll_t events; unsigned shared_count, seq; dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) - return POLLERR; + return EPOLLERR; resv = dmabuf->resv; poll_wait(file, &dmabuf->poll, poll); - events = poll_requested_events(poll) & (POLLIN | POLLOUT); + events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); if (!events) return 0; @@ -193,12 +193,12 @@ retry: goto retry; } - if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { + if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; - unsigned long pevents = POLLIN; + __poll_t pevents = EPOLLIN; if (shared_count == 0) - pevents |= POLLOUT; + pevents |= EPOLLOUT; spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) { @@ -228,19 +228,19 @@ retry: } } - if ((events & POLLOUT) && shared_count > 0) { + if ((events & EPOLLOUT) && shared_count > 0) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; int i; /* Only queue a new callback if no event has fired yet */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) - events &= ~POLLOUT; + events &= ~EPOLLOUT; else - dcb->active = POLLOUT; + dcb->active = EPOLLOUT; spin_unlock_irq(&dmabuf->poll.lock); - if (!(events & POLLOUT)) + if (!(events & EPOLLOUT)) goto out; for (i = 0; i < shared_count; ++i) { @@ -253,14 +253,14 @@ retry: * * call dma_buf_poll_cb and force a recheck! */ - events &= ~POLLOUT; + events &= ~EPOLLOUT; dma_buf_poll_cb(NULL, &dcb->cb); break; } if (!dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb)) { dma_fence_put(fence); - events &= ~POLLOUT; + events &= ~EPOLLOUT; break; } dma_fence_put(fence); @@ -351,13 +351,13 @@ static inline int is_dma_buf_file(struct file *file) * * 2. Userspace passes this file-descriptors to all drivers it wants this buffer * to share with: First the filedescriptor is converted to a &dma_buf using - * dma_buf_get(). The the buffer is attached to the device using + * dma_buf_get(). Then the buffer is attached to the device using * dma_buf_attach(). * * Up to this stage the exporter is still free to migrate or reallocate the * backing storage. * - * 3. Once the buffer is attached to all devices userspace can inniate DMA + * 3. Once the buffer is attached to all devices userspace can initiate DMA * access to the shared buffer. In the kernel this is done by calling * dma_buf_map_attachment() and dma_buf_unmap_attachment(). * @@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. * - * A mapping must be unmapped again using dma_buf_map_attachment(). Note that + * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { - struct sg_table *sg_table = ERR_PTR(-EINVAL); + struct sg_table *sg_table; might_sleep(); @@ -1179,8 +1179,7 @@ static int dma_buf_init_debugfs(void) static void dma_buf_uninit_debugfs(void) { - if (dma_buf_debugfs_dir) - debugfs_remove_recursive(dma_buf_debugfs_dir); + debugfs_remove_recursive(dma_buf_debugfs_dir); } #else static inline int dma_buf_init_debugfs(void) diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 0350829ba62e..dd1edfb27b61 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void irq_dma_fence_array_work(struct irq_work *wrk) +{ + struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + + dma_fence_signal(&array->base); + dma_fence_put(&array->base); +} + static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_array *array = array_cb->array; if (atomic_dec_and_test(&array->num_pending)) - dma_fence_signal(&array->base); - dma_fence_put(&array->base); + irq_work_queue(&array->work); + else + dma_fence_put(&array->base); } static bool dma_fence_array_enable_signaling(struct dma_fence *fence) @@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, spin_lock_init(&array->lock); dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, context, seqno); + init_irq_work(&array->work, irq_dma_fence_array_work); array->num_fences = num_fences; atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 9a302799040e..5d101c4053e0 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -27,7 +27,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> -EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index dec3a815455d..04ebe2204c12 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -104,7 +104,8 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, struct reservation_object_list *fobj, struct dma_fence *fence) { - u32 i; + struct dma_fence *signaled = NULL; + u32 i, signaled_idx; dma_fence_get(fence); @@ -126,17 +127,28 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, dma_fence_put(old_fence); return; } + + if (!signaled && dma_fence_is_signaled(old_fence)) { + signaled = old_fence; + signaled_idx = i; + } } /* * memory barrier is added by write_seqcount_begin, * fobj->shared_count is protected by this lock too */ - RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); - fobj->shared_count++; + if (signaled) { + RCU_INIT_POINTER(fobj->shared[signaled_idx], fence); + } else { + RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); + fobj->shared_count++; + } write_seqcount_end(&obj->seq); preempt_enable(); + + dma_fence_put(signaled); } static void @@ -145,8 +157,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj, struct reservation_object_list *fobj, struct dma_fence *fence) { - unsigned i; - struct dma_fence *old_fence = NULL; + unsigned i, j, k; dma_fence_get(fence); @@ -162,24 +173,21 @@ reservation_object_add_shared_replace(struct reservation_object *obj, * references from the old struct are carried over to * the new. */ - fobj->shared_count = old->shared_count; - - for (i = 0; i < old->shared_count; ++i) { + for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) { struct dma_fence *check; check = rcu_dereference_protected(old->shared[i], reservation_object_held(obj)); - if (!old_fence && check->context == fence->context) { - old_fence = check; - RCU_INIT_POINTER(fobj->shared[i], fence); - } else - RCU_INIT_POINTER(fobj->shared[i], check); - } - if (!old_fence) { - RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); - fobj->shared_count++; + if (check->context == fence->context || + dma_fence_is_signaled(check)) + RCU_INIT_POINTER(fobj->shared[--k], check); + else + RCU_INIT_POINTER(fobj->shared[j++], check); } + fobj->shared_count = j; + RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); + fobj->shared_count++; done: preempt_disable(); @@ -192,10 +200,18 @@ done: write_seqcount_end(&obj->seq); preempt_enable(); - if (old) - kfree_rcu(old, rcu); + if (!old) + return; - dma_fence_put(old_fence); + /* Drop the references to the signaled fences */ + for (i = k; i < fobj->shared_max; ++i) { + struct dma_fence *f; + + f = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(obj)); + dma_fence_put(f); + } + kfree_rcu(old, rcu); } /** @@ -266,8 +282,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * @dst: the destination reservation object * @src: the source reservation object * -* Copy all fences from src to dst. Both src->lock as well as dst-lock must be -* held. +* Copy all fences from src to dst. dst-lock must be held. */ int reservation_object_copy_fences(struct reservation_object *dst, struct reservation_object *src) @@ -277,33 +292,62 @@ int reservation_object_copy_fences(struct reservation_object *dst, size_t size; unsigned i; - src_list = reservation_object_get_list(src); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); +retry: if (src_list) { - size = offsetof(typeof(*src_list), - shared[src_list->shared_count]); + unsigned shared_count = src_list->shared_count; + + size = offsetof(typeof(*src_list), shared[shared_count]); + rcu_read_unlock(); + dst_list = kmalloc(size, GFP_KERNEL); if (!dst_list) return -ENOMEM; - dst_list->shared_count = src_list->shared_count; - dst_list->shared_max = src_list->shared_count; - for (i = 0; i < src_list->shared_count; ++i) - dst_list->shared[i] = - dma_fence_get(src_list->shared[i]); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + if (!src_list || src_list->shared_count > shared_count) { + kfree(dst_list); + goto retry; + } + + dst_list->shared_count = 0; + dst_list->shared_max = shared_count; + for (i = 0; i < src_list->shared_count; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference(src_list->shared[i]); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + continue; + + if (!dma_fence_get_rcu(fence)) { + kfree(dst_list); + src_list = rcu_dereference(src->fence); + goto retry; + } + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + continue; + } + + rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence); + } } else { dst_list = NULL; } + new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_unlock(); + kfree(dst->staged); dst->staged = NULL; src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); - new = reservation_object_get_excl(src); - - dma_fence_get(new); preempt_disable(); write_seqcount_begin(&dst->seq); @@ -427,13 +471,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, unsigned long timeout) { struct dma_fence *fence; - unsigned seq, shared_count, i = 0; + unsigned seq, shared_count; long ret = timeout ? timeout : 1; + int i; retry: shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); + i = -1; fence = rcu_dereference(obj->fence_excl); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { @@ -449,14 +495,14 @@ retry: fence = NULL; } - if (!fence && wait_all) { + if (wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; - for (i = 0; i < shared_count; ++i) { + for (i = 0; !fence && i < shared_count; ++i) { struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 38cc7389a6c1..24f83f9eeaed 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file) { struct sync_timeline *obj = file->private_data; + struct sync_pt *pt, *next; + + spin_lock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + dma_fence_set_error(&pt->base, -ENOENT); + dma_fence_signal_locked(&pt->base); + } - smp_wmb(); + spin_unlock_irq(&obj->lock); sync_timeline_put(obj); return 0; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 03830634e141..35dd06479867 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -312,7 +312,7 @@ static int sync_file_release(struct inode *inode, struct file *file) return 0; } -static unsigned int sync_file_poll(struct file *file, poll_table *wait) +static __poll_t sync_file_poll(struct file *file, poll_table *wait) { struct sync_file *sync_file = file->private_data; @@ -325,7 +325,7 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) wake_up_all(&sync_file->wq); } - return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; + return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0; } static long sync_file_ioctl_merge(struct sync_file *sync_file, |