aboutsummaryrefslogtreecommitdiffstats
path: root/sound/core/pcm_native.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-08-21 14:10:36 -0300
committerJason Gunthorpe <jgg@mellanox.com>2019-08-21 14:10:36 -0300
commit868df536f5e84672c3e002b949e0e44f97cb0f09 (patch)
treef76da5f6d06125b6d91c75ebfe4079ec9e2f958a /sound/core/pcm_native.c
parentRDMA: Delete DEBUG code (diff)
parentRDMA/mlx5: Use odp instead of mr->umem in pagefault_mr (diff)
downloadlinux-dev-868df536f5e84672c3e002b949e0e44f97cb0f09.tar.xz
linux-dev-868df536f5e84672c3e002b949e0e44f97cb0f09.zip
Merge branch 'odp_fixes' into rdma.git for-next
Jason Gunthorpe says: ==================== This is a collection of general cleanups for ODP to clarify some of the flows around umem creation and use of the interval tree. ==================== The branch is based on v5.3-rc5 due to dependencies * odp_fixes: RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address RDMA/core: Make invalidate_range a device operation RDMA/odp: Use kvcalloc for the dma_list and page_list RDMA/odp: Check for overflow when computing the umem_odp end RDMA/odp: Provide ib_umem_odp_release() to undo the allocs RDMA/odp: Split creating a umem_odp from ib_umem_get RDMA/odp: Make the three ways to create a umem_odp clear RMDA/odp: Consolidate umem_odp initialization RDMA/odp: Make it clearer when a umem is an implicit ODP umem RDMA/odp: Iterate over the whole rbtree directly RDMA/odp: Use the common interval tree library instead of generic RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'sound/core/pcm_native.c')
-rw-r--r--sound/core/pcm_native.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 860543a4c840..703857aab00f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
spin_lock_init(&group->lock);
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->substreams);
- refcount_set(&group->refs, 0);
+ refcount_set(&group->refs, 1);
}
/* define group lock helpers */
@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group,
if (!group)
return;
- do_free = refcount_dec_and_test(&group->refs) &&
- list_empty(&group->substreams);
+ do_free = refcount_dec_and_test(&group->refs);
snd_pcm_group_unlock(group, substream->pcm->nonatomic);
if (do_free)
kfree(group);
@@ -1874,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
if (runtime->no_period_wakeup)
@@ -1886,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
}
tout = msecs_to_jiffies(tout * 1000);
}
- tout = schedule_timeout_interruptible(tout);
+ tout = schedule_timeout(tout);
snd_pcm_stream_lock_irq(substream);
group = snd_pcm_stream_group_ref(substream);
@@ -2020,6 +2020,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
snd_pcm_group_lock_irq(target_group, nonatomic);
snd_pcm_stream_lock(substream1);
snd_pcm_group_assign(substream1, target_group);
+ refcount_inc(&target_group->refs);
snd_pcm_stream_unlock(substream1);
snd_pcm_group_unlock_irq(target_group, nonatomic);
_end:
@@ -2056,13 +2057,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
snd_pcm_group_lock_irq(group, nonatomic);
relink_to_local(substream);
+ refcount_dec(&group->refs);
/* detach the last stream, too */
if (list_is_singular(&group->substreams)) {
relink_to_local(list_first_entry(&group->substreams,
struct snd_pcm_substream,
link_list));
- do_free = !refcount_read(&group->refs);
+ do_free = refcount_dec_and_test(&group->refs);
}
snd_pcm_group_unlock_irq(group, nonatomic);