aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_prime.c
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2019-09-16 16:19:24 +0200
committerBen Skeggs <bskeggs@redhat.com>2019-09-17 14:50:15 +1000
commit0bb21c9677e5a5e2355f4f81a4e48afe62257a75 (patch)
tree3e3418f6a8ba19d80573c1574e8cec323ac8d1b3 /drivers/gpu/drm/nouveau/nouveau_prime.c
parentdrm/nouveau: Fix fallout from reservation object rework (diff)
downloadlinux-dev-0bb21c9677e5a5e2355f4f81a4e48afe62257a75.tar.xz
linux-dev-0bb21c9677e5a5e2355f4f81a4e48afe62257a75.zip
drm/nouveau/prime: Extend DMA reservation object lock
Prior to commit 019cbd4a4feb ("drm/nouveau: Initialize GEM object before TTM object"), the reservation object was locked across all of the buffer object creation. After splitting nouveau_bo_new() into separate nouveau_bo_alloc() and nouveau_bo_init() functions, the reservation object is passed to the latter, so the lock needs to be held across that function as well. Fixes: 019cbd4a4feb ("drm/nouveau: Initialize GEM object before TTM object") Signed-off-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 656c334ee7d9..bae6a3eccee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_gem_object *obj;
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
@@ -71,9 +72,10 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
- dma_resv_unlock(robj);
- if (IS_ERR(nvbo))
- return ERR_CAST(nvbo);
+ if (IS_ERR(nvbo)) {
+ obj = ERR_CAST(nvbo);
+ goto unlock;
+ }
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
@@ -82,16 +84,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
- return ERR_PTR(-ENOMEM);
+ obj = ERR_PTR(-ENOMEM);
+ goto unlock;
}
ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
- return ERR_PTR(ret);
+ obj = ERR_PTR(ret);
+ goto unlock;
}
- return &nvbo->bo.base;
+ obj = &nvbo->bo.base;
+
+unlock:
+ dma_resv_unlock(robj);
+ return obj;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)