From 20c827683de05a6c7e7ae7fae586899690693251 Mon Sep 17 00:00:00 2001 From: Frieder Schrempf Date: Mon, 24 Jul 2023 17:16:32 +0200 Subject: drm: bridge: samsung-dsim: Fix init during host transfer In case the downstream bridge or panel uses DSI transfers before the DSI host was actually initialized through samsung_dsim_atomic_enable() which clears the stop state (LP11) mode, all transfers will fail. This happens with downstream bridges that are controlled by DSI commands such as the tc358762. As documented in [1] DSI hosts are expected to allow transfers outside the normal bridge enable/disable flow. To fix this make sure that stop state is cleared in samsung_dsim_host_transfer() which restores the previous behavior. We also factor out the common code to enable/disable stop state to samsung_dsim_set_stop_state(). [1] https://docs.kernel.org/gpu/drm-kms-helpers.html#mipi-dsi-bridge-operation Fixes: 0c14d3130654 ("drm: bridge: samsung-dsim: Fix i.MX8M enable flow to meet spec") Reported-by: Tim Harvey Signed-off-by: Frieder Schrempf Reviewed-by: Neil Armstrong Tested-by: Tim Harvey Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20230724151640.555490-1-frieder@fris.de --- drivers/gpu/drm/bridge/samsung-dsim.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 043b8109e64a..73ec60757dbc 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi) disable_irq(dsi->irq); } +static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable) +{ + u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); + + if (enable) + reg |= DSIM_FORCE_STOP_STATE; + else + reg &= ~DSIM_FORCE_STOP_STATE; + + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); +} + static int samsung_dsim_init(struct samsung_dsim *dsi) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - u32 reg; if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { samsung_dsim_set_display_mode(dsi); samsung_dsim_set_display_enable(dsi, true); } else { - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); - reg &= ~DSIM_FORCE_STOP_STATE; - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); + samsung_dsim_set_stop_state(dsi, false); } dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; @@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - u32 reg; if (!(dsi->state & DSIM_STATE_ENABLED)) return; - if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); - reg |= DSIM_FORCE_STOP_STATE; - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); - } + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) + samsung_dsim_set_stop_state(dsi, true); dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; } @@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host, if (ret) return ret; + samsung_dsim_set_stop_state(dsi, false); + ret = mipi_dsi_create_packet(&xfer.packet, msg); if (ret < 0) return ret; -- cgit v1.2.3-59-g8ed1b From f19df6e4de64b7fc6d71f192aa9ff3b701e4bade Mon Sep 17 00:00:00 2001 From: David Michael Date: Tue, 15 Aug 2023 21:42:41 -0400 Subject: drm/panfrost: Skip speed binning on EOPNOTSUPP Encountered on an ARM Mali-T760 MP4, attempting to read the nvmem variable can also return EOPNOTSUPP instead of ENOENT when speed binning is unsupported. Cc: Fixes: 7d690f936e9b ("drm/panfrost: Add basic support for speed binning") Signed-off-by: David Michael Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/87msyryd7y.fsf@gmail.com --- drivers/gpu/drm/panfrost/panfrost_devfreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 58dfb15a8757..e78de99e9933 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -96,7 +96,7 @@ static int panfrost_read_speedbin(struct device *dev) * keep going without it; any other error means that we are * supposed to read the bin value, but we failed doing so. */ - if (ret != -ENOENT) { + if (ret != -ENOENT && ret != -EOPNOTSUPP) { DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret); return ret; } -- cgit v1.2.3-59-g8ed1b From 5ad1ab30ac0809d2963ddcf39ac34317a24a2f17 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Fri, 18 Aug 2023 10:14:36 +0530 Subject: drm/display/dp: Fix the DP DSC Receiver cap size DP DSC Receiver Capabilities are exposed via DPCD 60h-6Fh. Fix the DSC RECEIVER CAP SIZE accordingly. Fixes: ffddc4363c28 ("drm/dp: Add DP DSC DPCD receiver capability size define and missing SHIFT") Cc: Anusha Srivatsa Cc: Manasi Navare Cc: # v5.0+ Signed-off-by: Ankit Nautiyal Reviewed-by: Stanislav Lisovskiy Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20230818044436.177806-1-ankit.k.nautiyal@intel.com --- include/drm/display/drm_dp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 02f2ac4dd2df..e69cece404b3 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1537,7 +1537,7 @@ enum drm_dp_phy { #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 #define DP_LTTPR_COMMON_CAP_SIZE 8 -- cgit v1.2.3-59-g8ed1b From 2872144aec04baa7e43ecd2a60f7f0be3aa843fd Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 16 Aug 2023 18:22:16 +0530 Subject: drm/i915/dgfx: Enable d3cold at s2idle System wide suspend already has support for lmem save/restore during suspend therefore enabling d3cold for s2idle and keepng it disable for runtime PM.(Refer below commit for d3cold runtime PM disable justification) 'commit 66eb93e71a7a ("drm/i915/dgfx: Keep PCI autosuspend control 'on' by default on all dGPU")' It will reduce the DG2 Card power consumption to ~0 Watt for s2idle power KPI. v2: - Added "Cc: stable@vger.kernel.org". Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8755 Cc: stable@vger.kernel.org Cc: Rodrigo Vivi Signed-off-by: Anshuman Gupta Reviewed-by: Rodrigo Vivi Tested-by: Aaron Ma Tested-by: Jianshui Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230816125216.1722002-1-anshuman.gupta@intel.com (cherry picked from commit 2643e6d1f2a5e51877be24042d53cf956589be10) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_driver.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 0ad0c5885ec2..7d8671fdf447 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -443,7 +443,6 @@ static int i915_pcode_init(struct drm_i915_private *i915) static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; int ret; if (i915_inject_probe_failure(dev_priv)) @@ -557,15 +556,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_bw_init_hw(dev_priv); - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_disable(root_pdev); - return 0; err_opregion: @@ -591,7 +581,6 @@ err_perf: static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; i915_perf_fini(dev_priv); @@ -599,10 +588,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_enable(root_pdev); } /** @@ -1517,6 +1502,8 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1568,6 +1555,15 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM. + */ + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_disable(root_pdev); + rpm->suspended = true; /* @@ -1606,6 +1602,8 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1619,6 +1617,11 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_enable(root_pdev); + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); -- cgit v1.2.3-59-g8ed1b From e0d25c591ac676ece0e1ad6bbd72a159b9355598 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 21 Jun 2023 15:31:56 +0300 Subject: drm/i915: fix Sphinx indentation warning Fix Sphinx warning about unexpected indent. Signed-off-by: Jani Nikula Reviewed-by: Luca Coelho Link: https://patchwork.freedesktop.org/patch/msgid/20230621123156.14907-2-jani.nikula@intel.com (cherry picked from commit 175b036472f678948b03baabce4a008b7ba91ce7) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ddd146265beb..fa70defcb5b2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -26,6 +26,7 @@ * The kernel driver is only responsible for loading the HuC firmware and * triggering its security authentication. This is done differently depending * on the platform: + * * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA * and the authentication via GuC * - DG2: load and authentication are both performed via GSC. @@ -33,6 +34,7 @@ * not-DG2 older platforms), while the authentication is done in 2-steps, * a first auth for clear-media workloads via GuC and a second one for all * workloads via GSC. + * * On platforms where the GuC does the authentication, to correctly do so the * HuC binary must be loaded before the GuC one. * Loading the HuC is optional; however, not using the HuC might negatively -- cgit v1.2.3-59-g8ed1b From e531fdb5cd5ee2564b7fe10c8a9219e2b2fac61e Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 18 Aug 2023 07:59:38 -0700 Subject: dma-buf/sw_sync: Avoid recursive lock during fence signal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a signal callback releases the sw_sync fence, that will trigger a deadlock as the timeline_fence_release recurses onto the fence->lock (used both for signaling and the the timeline tree). To avoid that, temporarily hold an extra reference to the signalled fences until after we drop the lock. (This is an alternative implementation of https://patchwork.kernel.org/patch/11664717/ which avoids some potential UAF issues with the original patch.) v2: Remove now obsolete comment, use list_move_tail() and list_del_init() Reported-by: Bas Nieuwenhuizen Fixes: d3c6dd1fb30d ("dma-buf/sw_sync: Synchronize signal vs syncpt free") Signed-off-by: Rob Clark Link: https://patchwork.freedesktop.org/patch/msgid/20230818145939.39697-1-robdclark@gmail.com Reviewed-by: Christian König Signed-off-by: Christian König --- drivers/dma-buf/sw_sync.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 63f0aeb66db6..f0a35277fd84 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = { */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { + LIST_HEAD(signalled); struct sync_pt *pt, *next; trace_sync_timeline(obj); @@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) if (!timeline_fence_signaled(&pt->base)) break; - list_del_init(&pt->link); + dma_fence_get(&pt->base); + + list_move_tail(&pt->link, &signalled); rb_erase(&pt->node, &obj->pt_tree); - /* - * A signal callback may release the last reference to this - * fence, causing it to be freed. That operation has to be - * last to avoid a use after free inside this loop, and must - * be after we remove the fence from the timeline in order to - * prevent deadlocking on timeline->lock inside - * timeline_fence_release(). - */ dma_fence_signal_locked(&pt->base); } spin_unlock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &signalled, link) { + list_del_init(&pt->link); + dma_fence_put(&pt->base); + } } /** -- cgit v1.2.3-59-g8ed1b From 14abdfae508228a7307f7491b5c4215ae70c6542 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Fri, 16 Jun 2023 15:09:34 -0400 Subject: drm/vmwgfx: Fix shader stage validation For multiple commands the driver was not correctly validating the shader stages resulting in possible kernel oopses. The validation code was only. if ever, checking the upper bound on the shader stages but never a lower bound (valid shader stages start at 1 not 0). Fixes kernel oopses ending up in vmw_binding_add, e.g.: Oops: 0000 [#1] PREEMPT SMP PTI CPU: 1 PID: 2443 Comm: testcase Not tainted 6.3.0-rc4-vmwgfx #1 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020 RIP: 0010:vmw_binding_add+0x4c/0x140 [vmwgfx] Code: 7e 30 49 83 ff 0e 0f 87 ea 00 00 00 4b 8d 04 7f 89 d2 89 cb 48 c1 e0 03 4c 8b b0 40 3d 93 c0 48 8b 80 48 3d 93 c0 49 0f af de <48> 03 1c d0 4c 01 e3 49 8> RSP: 0018:ffffb8014416b968 EFLAGS: 00010206 RAX: ffffffffc0933ec0 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 00000000ffffffff RSI: ffffb8014416b9c0 RDI: ffffb8014316f000 RBP: ffffb8014416b998 R08: 0000000000000003 R09: 746f6c735f726564 R10: ffffffffaaf2bda0 R11: 732e676e69646e69 R12: ffffb8014316f000 R13: ffffb8014416b9c0 R14: 0000000000000040 R15: 0000000000000006 FS: 00007fba8c0af740(0000) GS:ffff8a1277c80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000007c0933eb8 CR3: 0000000118244001 CR4: 00000000003706e0 Call Trace: vmw_view_bindings_add+0xf5/0x1b0 [vmwgfx] ? ___drm_dbg+0x8a/0xb0 [drm] vmw_cmd_dx_set_shader_res+0x8f/0xc0 [vmwgfx] vmw_execbuf_process+0x590/0x1360 [vmwgfx] vmw_execbuf_ioctl+0x173/0x370 [vmwgfx] ? __drm_dev_dbg+0xb4/0xe0 [drm] ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx] drm_ioctl_kernel+0xbc/0x160 [drm] drm_ioctl+0x2d2/0x580 [drm] ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx] ? do_fault+0x1a6/0x420 vmw_generic_ioctl+0xbd/0x180 [vmwgfx] vmw_unlocked_ioctl+0x19/0x20 [vmwgfx] __x64_sys_ioctl+0x96/0xd0 do_syscall_64+0x5d/0x90 ? handle_mm_fault+0xe4/0x2f0 ? debug_smp_processor_id+0x1b/0x30 ? fpregs_assert_state_consistent+0x2e/0x50 ? exit_to_user_mode_prepare+0x40/0x180 ? irqentry_exit_to_user_mode+0xd/0x20 ? irqentry_exit+0x3f/0x50 ? exc_page_fault+0x8b/0x180 entry_SYSCALL_64_after_hwframe+0x72/0xdc Signed-off-by: Zack Rusin Cc: security@openanolis.org Reported-by: Ziming Zhang Testcase-found-by: Niels De Graef Fixes: d80efd5cb3de ("drm/vmwgfx: Initial DX support") Cc: # v4.3+ Reviewed-by: Maaz Mombasawala Reviewed-by: Martin Krastev Link: https://patchwork.freedesktop.org/patch/msgid/20230616190934.54828-1-zack@kde.org --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 12 ++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 29 +++++++++++------------------ 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3810a9984a7f..58bfdf203eca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1513,4 +1513,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw) return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; } +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 6b9aa2b4ef54..d30c0e3d3ab7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1992,7 +1992,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; @@ -2115,8 +2115,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); @@ -2133,6 +2131,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", + (unsigned int) cmd->body.type, + (unsigned int) cmd->body.slot); + return -EINVAL; + } + binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; @@ -2141,14 +2147,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; - if (binding.shader_slot >= max_shader_num || - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", - (unsigned int) cmd->body.type, - (unsigned int) binding.slot); - return -EINVAL; - } - vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); @@ -2207,15 +2205,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || - cmd->body.type >= max_allowed) { + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } @@ -2239,8 +2235,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; @@ -2251,8 +2245,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= max_allowed || - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; -- cgit v1.2.3-59-g8ed1b From f9e96bf1905479f18e83a3a4c314a8dfa56ede2c Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Fri, 18 Aug 2023 00:13:01 -0400 Subject: drm/vmwgfx: Fix possible invalid drm gem put calls vmw_bo_unreference sets the input buffer to null on exit, resulting in null ptr deref's on the subsequent drm gem put calls. This went unnoticed because only very old userspace would be exercising those paths but it wouldn't be hard to hit on old distros with brand new kernels. Introduce a new function that abstracts unrefing of user bo's to make the code cleaner and more explicit. Signed-off-by: Zack Rusin Reported-by: Ian Forbes Fixes: 9ef8d83e8e25 ("drm/vmwgfx: Do not drop the reference to the handle too soon") Cc: # v6.4+ Reviewed-by: Maaz Mombasawala Link: https://patchwork.freedesktop.org/patch/msgid/20230818041301.407636-1-zack@kde.org --- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++---- drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 8 ++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 6 ++---- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 ++---- drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 3 +-- drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 3 +-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 82094c137855..c43853597776 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -497,10 +497,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, if (!(flags & drm_vmw_synccpu_allow_cs)) { atomic_dec(&vmw_bo->cpu_writers); } - ttm_bo_put(&vmw_bo->tbo); + vmw_user_bo_unref(vmw_bo); } - drm_gem_object_put(&vmw_bo->tbo.base); return ret; } @@ -540,8 +539,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); - vmw_bo_unreference(&vbo); - drm_gem_object_put(&vbo->tbo.base); + vmw_user_bo_unref(vbo); if (unlikely(ret != 0)) { if (ret == -ERESTARTSYS || ret == -EBUSY) return -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 50a836e70994..1d433fceed3d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -195,6 +195,14 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) return buf; } +static inline void vmw_user_bo_unref(struct vmw_bo *vbo) +{ + if (vbo) { + ttm_bo_put(&vbo->tbo); + drm_gem_object_put(&vbo->tbo.base); + } +} + static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) { return container_of((gobj), struct vmw_bo, tbo.base); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d30c0e3d3ab7..98e0723ca6f5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1164,8 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, } vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - ttm_bo_put(&vmw_bo->tbo); - drm_gem_object_put(&vmw_bo->tbo.base); + vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; @@ -1221,8 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - ttm_bo_put(&vmw_bo->tbo); - drm_gem_object_put(&vmw_bo->tbo.base); + vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b62207be3363..1489ad73c103 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1665,10 +1665,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ - if (bo) { - vmw_bo_unreference(&bo); - drm_gem_object_put(&bo->tbo.base); - } + if (bo) + vmw_user_bo_unref(bo); if (surface) vmw_surface_unreference(&surface); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 7e112319a23c..fb85f244c3d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,8 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_bo_unreference(&buf); - drm_gem_object_put(&buf->tbo.base); + vmw_user_bo_unref(buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index e7226db8b242..1e81ff2422cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -809,8 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, shader_type, num_input_sig, num_output_sig, tfile, shader_handle); out_bad_arg: - vmw_bo_unreference(&buffer); - drm_gem_object_put(&buffer->tbo.base); + vmw_user_bo_unref(buffer); return ret; } -- cgit v1.2.3-59-g8ed1b From a94e7ccfc400c024976f3c2f31689ed843498b7c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 22 Aug 2023 14:30:14 +0300 Subject: drm: Add an HPD poll helper to reschedule the poll work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper to reschedule drm_mode_config::output_poll_work after polling has been enabled for a connector (and needing a reschedule, since previously polling was disabled for all connectors and hence output_poll_work was not running). This is needed by the next patch fixing HPD polling on i915. CC: stable@vger.kernel.org # 6.4+ Cc: Dmitry Baryshkov Cc: dri-devel@lists.freedesktop.org Reviewed-by: Jouni Högander Reviewed-by: Dmitry Baryshkov Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20230822113015.41224-1-imre.deak@intel.com (cherry picked from commit fe2352fd64029918174de4b460dfe6df0c6911cd) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/drm_probe_helper.c | 68 ++++++++++++++++++++++++++------------ include/drm/drm_probe_helper.h | 1 + 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 2fb9bf901a2c..3f479483d7d8 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) +static void reschedule_output_poll_work(struct drm_device *dev) +{ + unsigned long delay = DRM_OUTPUT_POLL_PERIOD; + + if (dev->mode_config.delayed_event) + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ + delay = HZ; + + schedule_delayed_work(&dev->mode_config.output_poll_work, delay); +} + /** * drm_kms_helper_poll_enable - re-enable output polling. * @dev: drm_device @@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - bool poll = false; - unsigned long delay = DRM_OUTPUT_POLL_PERIOD; - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll || dev->mode_config.poll_running) return; - poll = drm_kms_helper_enable_hpd(dev); - - if (dev->mode_config.delayed_event) { - /* - * FIXME: - * - * Use short (1s) delay to handle the initial delayed event. - * This delay should not be needed, but Optimus/nouveau will - * fail in a mysterious way if the delayed event is handled as - * soon as possible like it is done in - * drm_helper_probe_single_connector_modes() in case the poll - * was enabled before. - */ - poll = true; - delay = HZ; - } - - if (poll) - schedule_delayed_work(&dev->mode_config.output_poll_work, delay); + if (drm_kms_helper_enable_hpd(dev) || + dev->mode_config.delayed_event) + reschedule_output_poll_work(dev); dev->mode_config.poll_running = true; } EXPORT_SYMBOL(drm_kms_helper_poll_enable); +/** + * drm_kms_helper_poll_reschedule - reschedule the output polling work + * @dev: drm_device + * + * This function reschedules the output polling work, after polling for a + * connector has been enabled. + * + * Drivers must call this helper after enabling polling for a connector by + * setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags + * in drm_connector::polled. Note that after disabling polling by clearing these + * flags for a connector will stop the output polling work automatically if + * the polling is disabled for all other connectors as well. + * + * The function can be called only after polling has been enabled by calling + * drm_kms_helper_poll_init() / drm_kms_helper_poll_enable(). + */ +void drm_kms_helper_poll_reschedule(struct drm_device *dev) +{ + if (dev->mode_config.poll_running) + reschedule_output_poll_work(dev); +} +EXPORT_SYMBOL(drm_kms_helper_poll_reschedule); + static enum drm_connector_status drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force) { diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 4977e0ab72db..fad3c4003b2b 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); +void drm_kms_helper_poll_reschedule(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, -- cgit v1.2.3-59-g8ed1b From 1dcc437427bbcebc8381226352f7ade08a271191 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 22 Aug 2023 14:30:15 +0300 Subject: drm/i915: Fix HPD polling, reenabling the output poll work as needed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After the commit in the Fixes: line below, HPD polling stopped working on i915, since after that change calling drm_kms_helper_poll_enable() doesn't restart drm_mode_config::output_poll_work if the work was stopped (no connectors needing polling) and enabling polling for a connector (during runtime suspend or detecting an HPD IRQ storm). After the above change calling drm_kms_helper_poll_enable() is a nop after it's been called already and polling for some connectors was disabled/re-enabled. Fix this by calling drm_kms_helper_poll_reschedule() added in the previous patch instead, which reschedules the work whenever expected. Fixes: d33a54e3991d ("drm/probe_helper: sort out poll_running vs poll_enabled") CC: stable@vger.kernel.org # 6.4+ Cc: Dmitry Baryshkov Cc: dri-devel@lists.freedesktop.org Reviewed-by: Jouni Högander Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20230822113015.41224-2-imre.deak@intel.com (cherry picked from commit 50452f2f76852322620b63e62922b85e955abe94) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/display/intel_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 1160fa20433b..5eac7032bb5a 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -211,7 +211,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(&dev_priv->drm); + drm_kms_helper_poll_reschedule(&dev_priv->drm); mod_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); @@ -649,7 +649,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); if (enabled) - drm_kms_helper_poll_enable(&dev_priv->drm); + drm_kms_helper_poll_reschedule(&dev_priv->drm); mutex_unlock(&dev_priv->drm.mode_config.mutex); -- cgit v1.2.3-59-g8ed1b