From 0036bc73ccbe7e600a3468bf8e8879b122252274 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 30 Jun 2019 05:47:22 -0700 Subject: drm/msm: stop abusing dma_map/unmap for cache Recently splats like this started showing up: WARNING: CPU: 4 PID: 251 at drivers/iommu/dma-iommu.c:451 __iommu_dma_unmap+0xb8/0xc0 Modules linked in: ath10k_snoc ath10k_core fuse msm ath mac80211 uvcvideo cfg80211 videobuf2_vmalloc videobuf2_memops vide CPU: 4 PID: 251 Comm: kworker/u16:4 Tainted: G W 5.2.0-rc5-next-20190619+ #2317 Hardware name: LENOVO 81JL/LNVNB161216, BIOS 9UCN23WW(V1.06) 10/25/2018 Workqueue: msm msm_gem_free_work [msm] pstate: 80c00005 (Nzcv daif +PAN +UAO) pc : __iommu_dma_unmap+0xb8/0xc0 lr : __iommu_dma_unmap+0x54/0xc0 sp : ffff0000119abce0 x29: ffff0000119abce0 x28: 0000000000000000 x27: ffff8001f9946648 x26: ffff8001ec271068 x25: 0000000000000000 x24: ffff8001ea3580a8 x23: ffff8001f95ba010 x22: ffff80018e83ba88 x21: ffff8001e548f000 x20: fffffffffffff000 x19: 0000000000001000 x18: 00000000c00001fe x17: 0000000000000000 x16: 0000000000000000 x15: ffff000015b70068 x14: 0000000000000005 x13: 0003142cc1be1768 x12: 0000000000000001 x11: ffff8001f6de9100 x10: 0000000000000009 x9 : ffff000015b78000 x8 : 0000000000000000 x7 : 0000000000000001 x6 : fffffffffffff000 x5 : 0000000000000fff x4 : ffff00001065dbc8 x3 : 000000000000000d x2 : 0000000000001000 x1 : fffffffffffff000 x0 : 0000000000000000 Call trace: __iommu_dma_unmap+0xb8/0xc0 iommu_dma_unmap_sg+0x98/0xb8 put_pages+0x5c/0xf0 [msm] msm_gem_free_work+0x10c/0x150 [msm] process_one_work+0x1e0/0x330 worker_thread+0x40/0x438 kthread+0x12c/0x130 ret_from_fork+0x10/0x18 ---[ end trace afc0dc5ab81a06bf ]--- Not quite sure what triggered that, but we really shouldn't be abusing dma_{map,unmap}_sg() for cache maint. Cc: Stephen Boyd Tested-by: Stephen Boyd Reviewed-by: Jordan Crouse Signed-off-by: Rob Clark Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190630124735.27786-1-robdclark@gmail.com --- drivers/gpu/drm/msm/msm_gem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.c') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8b78554cfde3..c2114c748c2f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, + dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } @@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); -- cgit v1.2.3-59-g8ed1b From 3de433c5b38af49a5fc7602721e2ab5d39f1e69c Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 30 Jul 2019 14:46:28 -0700 Subject: drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold Signed-off-by: Rob Clark [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com --- drivers/gpu/drm/msm/msm_gem.c | 47 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.c') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c2114c748c2f..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); -- cgit v1.2.3-59-g8ed1b