aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 08:48:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 08:48:31 -0700
commitf98c2135f8e73f1cfba640dfb80beb0dad75f278 (patch)
tree4c97f1a81474ba6f3cb3c281685f0c79eb226859 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parentMerge tag 'asm-generic-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic (diff)
parentMerge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next (diff)
downloadlinux-dev-f98c2135f8e73f1cfba640dfb80beb0dad75f278.tar.xz
linux-dev-f98c2135f8e73f1cfba640dfb80beb0dad75f278.zip
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Just a couple of dma-buf related fixes and some amdgpu fixes, along with a regression fix for radeon off but default feature, but makes my 30" monitor happy again" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: drm/radeon/mst: cleanup code indentation drm/radeon/mst: fix regression in lane/link handling. drm/amdgpu: add invalidate_page callback for userptrs drm/amdgpu: Revert "remove the userptr rmn->lock" drm/amdgpu: clean up path handling for powerplay drm/amd/powerplay: fix memory leak of tdp_table dma-buf/fence: fix fence_is_later v2 dma-buf: Update docs for SYNC ioctl drm: remove excess description dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access() drm/atmel-hlcdc: use helper to get crtc state drm/atomic: use helper to get crtc state
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c120
1 files changed, 86 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d7ec9bd6755f..9f4a45cd2aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,7 +48,8 @@ struct amdgpu_mn {
/* protected by adev->mn_lock */
struct hlist_node node;
- /* objects protected by mm->mmap_sem */
+ /* objects protected by lock */
+ struct mutex lock;
struct rb_root objects;
};
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -105,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}
/**
+ * amdgpu_mn_invalidate_node - unmap all BOs of a node
+ *
+ * @node: the node with the BOs to unmap
+ *
+ * We block for all BOs and unmap them by move them
+ * into system domain again.
+ */
+static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_bo *bo;
+ long r;
+
+ list_for_each_entry(bo, &node->bos, mn_list) {
+
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
+ continue;
+
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ continue;
+ }
+
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false, MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+ amdgpu_bo_unreserve(bo);
+ }
+}
+
+/**
+ * amdgpu_mn_invalidate_page - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @address: address of invalidate page
+ *
+ * Invalidation of a single page. Blocks for all BOs mapping it
+ * and unmap them by move them into system domain again.
+ */
+static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct interval_tree_node *it;
+
+ mutex_lock(&rmn->lock);
+
+ it = interval_tree_iter_first(&rmn->objects, address, address);
+ if (it) {
+ struct amdgpu_mn_node *node;
+
+ node = container_of(it, struct amdgpu_mn_node, it);
+ amdgpu_mn_invalidate_node(node, address, address);
+ }
+
+ mutex_unlock(&rmn->lock);
+}
+
+/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
+ mutex_lock(&rmn->lock);
+
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
- long r;
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
- end))
- continue;
-
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
- }
+ amdgpu_mn_invalidate_node(node, start, end);
}
+
+ mutex_unlock(&rmn->lock);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
+ .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
+ mutex_init(&rmn->lock);
rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return -ENOMEM;
}
}
@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return 0;
}
@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}