aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-06-24 07:15:17 +1000
committerDave Airlie <airlied@redhat.com>2021-06-24 07:21:16 +1000
commit334200bf52f0637a5ab8331c557dfcecbb9c30fa (patch)
tree506009c182f6204a0998e2b892716bfa8cbac6e0 /drivers/gpu/drm/msm/msm_gem.c
parentMerge tag 'drm-misc-next-fixes-2021-06-18' of git://anongit.freedesktop.org/drm/drm-misc into drm-next (diff)
parentRevert "drm/msm/mdp5: provide dynamic bandwidth management" (diff)
downloadlinux-dev-334200bf52f0637a5ab8331c557dfcecbb9c30fa.tar.xz
linux-dev-334200bf52f0637a5ab8331c557dfcecbb9c30fa.zip
Merge tag 'drm-msm-next-2021-06-23b' of https://gitlab.freedesktop.org/drm/msm into drm-next
* devcoredump support for display errors * dpu: irq cleanup/refactor * dpu: dt bindings conversion to yaml * dsi: dt bindings conversion to yaml * mdp5: alpha/blend_mode/zpos support * a6xx: cached coherent buffer support * a660 support * gpu iova fault improvements: - info about which block triggered the fault, etc - generation of gpu devcoredump on fault * assortment of other cleanups and fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs4=qsGBBbyn-4JWqW4-YUSTKh67X3DsPQ=T2D9aXKqNA@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b61f5466e522..141178754231 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -211,6 +211,13 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
msm_gem_unlock(obj);
}
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return pgprot_writecombine(prot);
+ return prot;
+}
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -218,22 +225,7 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
-
- if (msm_obj->flags & MSM_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else if (msm_obj->flags & MSM_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- } else {
- /*
- * Shunt off cached objs to shmem file so they have their own
- * address_space (so unmap_mapping_range does what we want,
- * in particular in the case of mmap'd dmabufs)
- */
- vma->vm_pgoff = 0;
- vma_set_file(vma, obj->filp);
-
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- }
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
@@ -372,7 +364,7 @@ static void del_vma(struct msm_gem_vma *vma)
kfree(vma);
}
-/**
+/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
* case (!close), we keep the iova allocated, but only remove the iommu
@@ -451,6 +443,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
+ if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+ prot |= IOMMU_CACHE;
+
GEM_WARN_ON(!msm_gem_is_locked(obj));
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -653,7 +648,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
@@ -773,7 +768,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1);
}
-/**
+/*
* Unpin the backing pages and make them available to be swapped out.
*/
void msm_gem_evict(struct drm_gem_object *obj)
@@ -1163,6 +1158,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -1170,6 +1166,10 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
+ case MSM_BO_CACHED_COHERENT:
+ if (priv->has_cached_coherent)
+ break;
+ /* fallthrough */
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));