aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tegra
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2017-12-13 12:22:48 +0100
committerThierry Reding <treding@nvidia.com>2018-03-17 00:03:54 +0100
commitbd43c9f0fa1f664b58eefdc8aab7ac3c8b2026ec (patch)
tree02c82972bf7405fb3349cca37cbca9bfd9548112 /drivers/gpu/drm/tegra
parentdrm/tegra: hub: Use private object for global state (diff)
downloadlinux-dev-bd43c9f0fa1f664b58eefdc8aab7ac3c8b2026ec.tar.xz
linux-dev-bd43c9f0fa1f664b58eefdc8aab7ac3c8b2026ec.zip
drm/tegra: gem: Map pages via the DMA API
When allocating pages, map them with the DMA API in order to invalidate caches. This is the correct usage of the API and works just as well as faking up the SG table and using the dma_sync_sg_for_device() function. Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/tegra')
-rw-r--r--drivers/gpu/drm/tegra/gem.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index d3b3aa148225..8fc5860ab19a 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -203,6 +203,8 @@ free:
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
+ dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -213,8 +215,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
- struct scatterlist *s;
- unsigned int i;
+ int err;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
@@ -223,27 +224,26 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt))
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
goto put_pages;
+ }
- /*
- * Fake up the SG table so that dma_sync_sg_for_device() can be used
- * to flush the pages associated with it.
- *
- * TODO: Replace this by drm_clflash_sg() once it can be implemented
- * without relying on symbols that are not exported.
- */
- for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
- sg_dma_address(s) = sg_phys(s);
-
- dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_TO_DEVICE);
+ err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
+ if (err == 0) {
+ err = -EFAULT;
+ goto free_sgt;
+ }
return 0;
+free_sgt:
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ return err;
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)