aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c39
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c84
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c148
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c5
-rw-r--r--drivers/gpu/drm/amd/include/navi10_enum.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c9
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c31
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c14
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c42
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c10
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h10
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c4
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c36
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c25
-rw-r--r--drivers/gpu/drm/drm_ioc32.c15
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c25
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c85
-rw-r--r--drivers/gpu/drm/drm_plane.c14
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c34
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c99
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c212
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c27
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.c5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c12
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c21
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c23
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c8
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c17
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c10
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c8
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c14
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c44
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c118
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c5
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c36
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h8
-rw-r--r--drivers/gpu/drm/tegra/dc.c20
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c24
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c23
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/zte/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c30
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c5
315 files changed, 2547 insertions, 1023 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e67c194c2aca..649f17dfcf45 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -206,6 +206,7 @@ source "drivers/gpu/drm/arm/Kconfig"
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI && MMU
+ depends on AGP || !AGP
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 5b393622f592..a0f0a17e224f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d1e278e999ee..1b2fa8379830 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -762,7 +762,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 8
+#define HWIP_MAX_INSTANCE 10
struct amd_powerplay {
void *pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 82155ac3288a..64ee44c2fdd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -163,17 +163,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
return 0;
}
-static struct device *get_mfd_cell_dev(const char *device_name, int r)
+static int acp_genpd_add_device(struct device *dev, void *data)
{
- char auto_dev_name[25];
- struct device *dev;
+ struct generic_pm_domain *gpd = data;
+ int ret;
- snprintf(auto_dev_name, sizeof(auto_dev_name),
- "%s.%d.auto", device_name, r);
- dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
- dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret)
+ dev_err(dev, "Failed to add dev to genpd %d\n", ret);
- return dev;
+ return ret;
+}
+
+static int acp_genpd_remove_device(struct device *dev, void *data)
+{
+ int ret;
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret)
+ dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
+
+ /* Continue to remove */
+ return 0;
}
/**
@@ -184,11 +195,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
*/
static int acp_hw_init(void *handle)
{
- int r, i;
+ int r;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
- struct device *dev;
struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -344,15 +354,10 @@ static int acp_hw_init(void *handle)
if (r)
goto failure;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
- if (r) {
- dev_err(dev, "Failed to add dev to genpd\n");
- goto failure;
- }
- }
-
+ r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
+ acp_genpd_add_device);
+ if (r)
+ goto failure;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
@@ -413,10 +418,8 @@ failure:
*/
static int acp_hw_fini(void *handle)
{
- int i, ret;
u32 val = 0;
u32 count = 0;
- struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
@@ -461,13 +464,8 @@ static int acp_hw_fini(void *handle)
udelay(100);
}
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(dev);
- /* If removal fails, dont giveup and try rest */
- if (ret)
- dev_err(dev, "remove dev from genpd failed\n");
- }
+ device_for_each_child(adev->acp.parent, NULL,
+ acp_genpd_remove_device);
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index ce30d4e8bf25..f7c4337c1ffe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -165,7 +165,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_tile_config = amdgpu_amdkfd_get_tile_config,
};
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index f3fa271e3394..49b52ac3e473 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -55,12 +55,6 @@ static struct {
spinlock_t mem_limit_lock;
} kfd_mem_limit;
-/* Struct used for amdgpu_amdkfd_bo_validate */
-struct amdgpu_vm_parser {
- uint32_t domain;
- bool wait;
-};
-
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
@@ -293,11 +287,9 @@ validate_fail:
return ret;
}
-static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
+static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
- struct amdgpu_vm_parser *p = param;
-
- return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
+ return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
@@ -311,20 +303,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
- struct amdgpu_vm_parser param;
int ret;
- param.domain = AMDGPU_GEM_DOMAIN_VRAM;
- param.wait = false;
-
- ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
- &param);
+ ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("amdgpu: failed to validate PT BOs\n");
return ret;
}
- ret = amdgpu_amdkfd_validate(&param, pd);
+ ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) {
pr_err("amdgpu: failed to validate PD\n");
return ret;
@@ -964,11 +951,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct dma_fence **ef)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct drm_file *drm_priv = filp->private_data;
- struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
- struct amdgpu_vm *avm = &drv_priv->vm;
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
int ret;
+ ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 85b0515c0fdc..e0d2f79571ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref)
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries, struct amdgpu_bo_list **result)
+ size_t num_entries, struct amdgpu_bo_list **result)
{
unsigned last_entry = 0, first_userptr = num_entries;
struct amdgpu_bo_list_entry *array;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index a130e766cbdb..529d52a204cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -60,7 +60,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
int amdgpu_bo_list_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
+ size_t num_entries,
struct amdgpu_bo_list **list);
static inline struct amdgpu_bo_list_entry *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cda0a76a733d..0e1cacf73169 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -389,6 +389,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -403,6 +406,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
@@ -829,6 +835,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
+ amdgpu_get_native_mode(connector);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82823d9a8ba8..7eeb98fe50ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -114,7 +114,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
@@ -1542,6 +1542,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
return 0;
default:
+ dma_fence_put(fence);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 2cdaf3b2a721..39ca0718ced0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -351,7 +351,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
- unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -376,21 +375,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
- /*query ue count*/
- ras_counter = amdgpu_ras_query_error_count(adev, false);
- /*ras counter is monotonic increasing*/
- if (ras_counter != ctx->ras_counter_ue) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
- ctx->ras_counter_ue = ras_counter;
- }
-
- /*query ce count*/
- ras_counter = amdgpu_ras_query_error_count(adev, true);
- if (ras_counter != ctx->ras_counter_ce) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
- ctx->ras_counter_ce = ras_counter;
- }
-
mutex_unlock(&mgr->lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 700e26b69abc..a9a81e55777b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -240,7 +240,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_PCIE(*pos >> 2);
+ value = RREG32_PCIE(*pos);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
@@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r)
return r;
- WREG32_PCIE(*pos >> 2, value);
+ WREG32_PCIE(*pos, value);
result += 4;
buf += 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4105fbf57167..d0e1fd011de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2057,11 +2057,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2291,7 +2291,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
- for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
int j;
struct amdgpu_ip_block *block;
@@ -3704,7 +3704,6 @@ out:
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(tmp_adev);
need_full_reset = true;
r = -EAGAIN;
goto end;
@@ -3890,7 +3889,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
- NULL,
+ (tmp_adev == adev) ? job : NULL,
&need_full_reset);
/*TODO Should we stop ?*/
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index fa2c0f29ad4d..ffd754713522 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -633,7 +633,7 @@ MODULE_PARM_DESC(sched_policy,
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
@@ -1011,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index eaa5e7b7c19d..46522804c7d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
- ttm_bo_type_kernel, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -289,10 +289,13 @@ out:
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+ int i;
drm_fb_helper_unregister_fbi(&rfbdev->helper);
if (rfb->base.obj[0]) {
+ for (i = 0; i < rfb->base.format->num_planes; i++)
+ drm_gem_object_put(rfb->base.obj[0]);
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5fa5158d18ee..9d61d1b7a569 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -561,6 +561,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
+ uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@@ -581,6 +582,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
+ return -EINVAL;
+ }
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f9bef3154b99..2659202f2026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -263,7 +263,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
- while (queue_bit-- >= 0) {
+ while (--queue_bit >= 0) {
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 70dbe343f51d..89cecdba81ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
void
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
- u8 val;
+ u8 val = 0;
if (!amdgpu_connector->router.ddc_valid)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..76429932035e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -469,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
- if (!src)
+ if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 28361a9c5add..4cc0dd494a42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -200,7 +200,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
placement->num_placement = c;
placement->placement = places;
@@ -1305,7 +1305,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
- dma_resv_lock(bo->base.resv, NULL);
+ if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+ return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 3f744e72912f..bcb7ab5c602d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -870,7 +870,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
- long level;
+ unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@@ -886,8 +886,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
+ ret = kstrtoul(sub_str, 0, &level);
+ if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 8a32b5c93778..bd7ae3e130b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -138,7 +138,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret;
}
- __decode_table_header_from_buff(hdr, &buff[2]);
+ __decode_table_header_from_buff(hdr, buff);
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 91899d28fa72..e8132210c244 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f15ded1ce905..870dd78d5a21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -967,6 +967,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
release_sg:
kfree(ttm->sg);
+ ttm->sg = NULL;
return r;
}
@@ -983,7 +984,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */
@@ -1299,6 +1300,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
+ ttm->sg = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
@@ -1974,7 +1976,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3a6115ad0196..f3250db7f9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -568,8 +568,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
- amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2c364b8695f..cfa8324b9f51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -231,7 +231,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
version_major, version_minor);
} else {
unsigned int enc_major, enc_minor, dec_minor;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6335bd4ae374..fb47ddc6f7f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2123,8 +2123,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2188,8 +2188,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2333,7 +2333,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2eda3a8c330d..4a64825b53cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -105,8 +105,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB_0 1
#define AMDGPU_MMHUB_1 2
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+/* Reserve 2MB at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 65aae75f80fd..ce1048bad158 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -311,15 +311,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
}
+/*
+ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
+ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
+ * num_hops[5:3] = reserved
+ * num_hops[2:0] = number of hops
+ */
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ uint8_t num_hops_mask = 0x7;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_hops;
+ return top->nodes[i].num_hops & num_hops_mask;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 4af9acc2dc4f..450ad7d5e21a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 1dca0cabc326..13520d173296 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -193,19 +193,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d17edc850427..1d8739a4fbca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -190,9 +190,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
@@ -210,12 +211,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 90dcc7afc9c4..5906a8951a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2906,8 +2906,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE,
- adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
}
}
@@ -4661,7 +4661,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -4673,8 +4673,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ else
+ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f642e066e67a..85ee0e849647 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -903,6 +903,8 @@ static int gmc_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v10_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -910,7 +912,6 @@ static int gmc_v10_0_hw_fini(void *handle)
}
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v10_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fb1765e92d1..e9f5de35f795 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -863,12 +863,12 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL;
- r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
return r;
}
- adev->need_swiotlb = drm_need_swiotlb(44);
+ adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v6_0_init_microcode(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ea764dd9245d..2975331a7b86 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -524,10 +524,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->gmc.vram_width) {
- u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
@@ -571,8 +571,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.vram_width = numchan * chansize;
}
/* size in MB on si */
- adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(mmCONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 688111ef814d..63205de4a565 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1526,6 +1526,8 @@ static int gmc_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1534,7 +1536,6 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v9_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index a13dd9a51149..7d165f024f07 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -193,19 +193,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 4b3faaccecb9..c8a5a5698edd 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1609,19 +1609,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
{
- u8 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].clk >= 0) /* XXX */
- break;
- }
-
- if (i >= table->count)
- i = table->count - 1;
-
- return i;
+ return 0;
}
static void kv_update_acp_boot_level(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 74a9fe8e0cfb..8c54f0be51ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -44,7 +44,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 465351184bc3..60180eca84a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -100,6 +100,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb4c891120b..9931d5c17cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7250,17 +7250,15 @@ static int si_parse_power_table(struct amdgpu_device *adev)
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
+ for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
@@ -7282,8 +7280,8 @@ static int si_parse_power_table(struct amdgpu_device *adev)
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ adev->pm.dpm.num_ps++;
}
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c086262cc181..b368496ed685 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -276,6 +276,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
+ if (adev->asic_type == CHIP_RENOIR)
+ return 10000;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
@@ -1130,7 +1132,6 @@ static int soc15_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1142,16 +1143,17 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
+ /*
+ * MMHUB PG needs to be disabled for Picasso for
+ * stability reasons.
+ */
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index e40140bf6699..db0a3bda13fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -195,19 +195,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 217084d56ab8..9deef20a0269 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -354,6 +354,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 4f0f0de83293..1bb0f3c0978a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -233,9 +233,13 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
ring->sched.ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index cd2cbe760e88..82327ee96f95 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -293,6 +293,8 @@ static int vcn_v2_0_hw_fini(void *handle)
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 9d778a0b2c5e..4c9a1633b02a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -302,6 +302,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..4a0ef9268918 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1664,6 +1664,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
}
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1673,6 +1674,7 @@ err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
err_unlock:
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 66387caf966e..3685e89415d5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -758,7 +758,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
/* Fetch the CRAT table from ACPI */
status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
if (status == AE_NOT_FOUND) {
- pr_warn("CRAT table not found\n");
+ pr_info("CRAT table not found\n");
return -ENODATA;
} else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 511712c2e382..673d5e34f213 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
+{
+ seq_printf(m, "echo gpu_id > hang_hws\n");
+ return 0;
+}
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
- NULL, &kfd_debugfs_hang_hws_fops);
+ kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
}
void kfd_debugfs_fini(void)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index ad9483b9eea3..60ee1a832112 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -609,15 +609,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
- kfd->vm_info.first_vmid_kfd + 1;
/* Verify module parameters regarding mapped process number*/
- if ((hws_max_conc_proc < 0)
- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
- dev_err(kfd_device,
- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
- kfd->vm_info.vmid_num_kfd);
+ if (hws_max_conc_proc >= 0)
+ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
- } else
- kfd->max_proc_per_quantum = hws_max_conc_proc;
/* Allocate global GWS that is shared by all KFD processes */
if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e9a278440079..723ec6c2830d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1011,6 +1011,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
+ uint64_t num_sdma_queues;
+ uint64_t num_xgmi_sdma_queues;
+
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@@ -1019,8 +1022,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
+
+ num_sdma_queues = get_num_sdma_queues(dqm);
+ if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
+ dqm->sdma_bitmap = ULLONG_MAX;
+ else
+ dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
+
+ num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
+ if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
+ dqm->xgmi_sdma_bitmap = ULLONG_MAX;
+ else
+ dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
@@ -1571,7 +1584,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct queue *q, *next;
+ struct queue *q;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
@@ -1626,24 +1639,26 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}
- dqm_unlock(dqm);
-
- /* Outside the DQM lock because under the DQM lock we can't do
- * reclaim or take other locks that others hold while reclaiming.
- */
- if (found)
- kfd_dec_compute_active(dqm->dev);
-
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
- list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ while (!list_empty(&qpd->queues_list)) {
+ q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
+ dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ dqm_lock(dqm);
}
+ dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (found)
+ kfd_dec_compute_active(dqm->dev);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 72e4d61ac752..ad0593342333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+ (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
#if 0
/* TODO:
* This shouldn't be an issue with Navi10. Verify.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..adbb2fec2e0f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -532,6 +532,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
+ if (!event_waiters)
+ return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..9266c8e76be7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -20,6 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/kconfig.h>
+
+#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
+
#include <linux/printk.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -358,3 +362,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
return 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
index dd23d9fdf6a8..afd420b01a0c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
@@ -23,7 +23,9 @@
#ifndef __KFD_IOMMU_H__
#define __KFD_IOMMU_H__
-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
+#include <linux/kconfig.h>
+
+#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
#define KFD_SUPPORT_IOMMU_V2
@@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
}
static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
{
+#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
+ WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
+#endif
return 0;
}
@@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
return 0;
}
-#endif /* defined(CONFIG_AMD_IOMMU_V2) */
+#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
#endif /* __KFD_IOMMU_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 88813dad731f..c021519af810 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t *se_mask)
{
struct kfd_cu_info cu_info;
- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
- int i, se, sh, cu = 0;
-
+ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
+ int i, se, sh, cu;
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
+ /* Exceeding these bounds corrupts the stack and indicates a coding error.
+ * Returning with no CU's enabled will hang the queue, which should be
+ * attention grabbing.
+ */
+ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
+ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
+ return;
+ }
+ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
+ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
+ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
+ return;
+ }
+ /* Count active CUs per SH.
+ *
+ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
+ * represented in the high bits of each SH's enable mask (the upper and lower
+ * 16 bits of se_mask) and will take care of the actual distribution of
+ * disabled CUs within each SH automatically.
+ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
+ *
+ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
+ */
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
-
- /* Symmetrically map cu_mask to all SEs:
- * cu_mask[0] bit0 -> se_mask[0] bit0;
- * cu_mask[0] bit1 -> se_mask[1] bit0;
- * ... (if # SE is 4)
- * cu_mask[0] bit4 -> se_mask[0] bit1;
+ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
+
+ /* Symmetrically map cu_mask to all SEs & SHs:
+ * se_mask programs up to 2 SH in the upper and lower 16 bits.
+ *
+ * Examples
+ * Assuming 1 SH/SE, 4 SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0
+ * cu_mask[0] bit1 -> se_mask[1] bit0
+ * ...
+ * cu_mask[0] bit4 -> se_mask[0] bit1
+ * ...
+ *
+ * Assuming 2 SH/SE, 4 SEs
+ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
+ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
+ * ...
+ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
+ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
+ * ...
+ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
+ *
+ * First ensure all CUs are disabled, then enable user specified CUs.
*/
- se = 0;
- for (i = 0; i < cu_mask_count; i++) {
- if (cu_mask[i / 32] & (1 << (i % 32)))
- se_mask[se] |= 1 << cu;
-
- do {
- se++;
- if (se == cu_info.num_shader_engines) {
- se = 0;
- cu++;
+ for (i = 0; i < cu_info.num_shader_engines; i++)
+ se_mask[i] = 0;
+
+ i = 0;
+ for (cu = 0; cu < 16; cu++) {
+ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
+ for (se = 0; se < cu_info.num_shader_engines; se++) {
+ if (cu_per_sh[se][sh] > cu) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << (cu + sh * 16);
+ i++;
+ if (i == cu_mask_count)
+ return;
+ }
}
- } while (cu >= cu_per_se[se] && cu < 32);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index fbdb16418847..4edc012e3138 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -27,6 +27,7 @@
#include "kfd_priv.h"
#define KFD_MAX_NUM_SE 8
+#define KFD_MAX_NUM_SH_PER_SE 2
/**
* struct mqd_manager
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2384aa018993..de33864af70b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -664,6 +664,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->rev_id;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
@@ -1093,8 +1094,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -1209,7 +1210,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->mst_port)
+ if (aconnector->dc_link &&
+ aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -1417,8 +1419,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@@ -1434,8 +1438,6 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_connector_update_edid_property(connector,
aconnector->edid);
- drm_add_edid_modes(connector, aconnector->edid);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
@@ -2632,6 +2634,40 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;
@@ -3956,6 +3992,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
struct amdgpu_device *adev = connector->dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
+ /*
+ * Call only if mst_mgr was iniitalized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -4971,6 +5014,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
mode = amdgpu_dm_create_common_mode(encoder,
common_modes[i].name, common_modes[i].w,
common_modes[i].h);
+ if (!mode)
+ continue;
+
drm_mode_probed_add(connector, mode);
amdgpu_dm_connector->num_modes++;
}
@@ -5365,10 +5411,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
int x, y;
int xorigin = 0, yorigin = 0;
- position->enable = false;
- position->x = 0;
- position->y = 0;
-
if (!crtc || !plane->state->fb)
return 0;
@@ -5420,7 +5462,7 @@ static void handle_cursor_update(struct drm_plane *plane,
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint64_t address = afb ? afb->address : 0;
- struct dc_cursor_position position;
+ struct dc_cursor_position position = {0};
struct dc_cursor_attributes attributes;
int ret;
@@ -6458,14 +6500,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -6475,17 +6517,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
@@ -6814,7 +6854,8 @@ skip_modeset:
BUG_ON(dm_new_crtc_state->stream == NULL);
/* Scaling or underscan settings */
- if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
@@ -6984,8 +7025,7 @@ static int dm_update_plane_state(struct dc *dc,
dm_old_plane_state->dc_state,
dm_state->context)) {
- ret = EINVAL;
- return ret;
+ return -EINVAL;
}
@@ -7229,10 +7269,13 @@ cleanup:
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
- struct drm_connector_state *conn_state;
+ struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
- for_each_new_connector_in_state(state, connector, conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
if (conn_state->crtc != crtc)
continue;
@@ -7250,6 +7293,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
+static int validate_overlay(struct drm_atomic_state *state)
+{
+ int i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
+
+ /* Check if primary plane is contained inside overlay */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ overlay_state = new_plane_state;
+ continue;
+ }
+ }
+
+ /* check if we're making changes to the overlay plane */
+ if (!overlay_state)
+ return 0;
+
+ /* check if overlay plane is enabled */
+ if (!overlay_state->crtc)
+ return 0;
+
+ /* find the primary plane for the CRTC that the overlay is enabled on */
+ primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+
+ /* check if primary plane is enabled */
+ if (!primary_state->crtc)
+ return 0;
+
+ /* Perform the bounds check to ensure the overlay plane covers the primary */
+ if (primary_state->crtc_x < overlay_state->crtc_x ||
+ primary_state->crtc_y < overlay_state->crtc_y ||
+ primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
+ primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
+ DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -7326,7 +7416,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->asic_type >= CHIP_NAVI10) {
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
@@ -7342,6 +7432,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret)
+ goto fail;
+
if (!new_crtc_state->enable)
continue;
@@ -7423,6 +7517,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ ret = validate_overlay(state);
+ if (ret)
+ goto fail;
+
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c8c525a2b505..54163c970e7a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -387,6 +387,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 2233d293a707..6acc460a3e98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -278,6 +278,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
+ * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
+ * the expected size.
+ * Returns 0 on success.
+ */
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
+{
+ const struct drm_color_lut *lut = NULL;
+ uint32_t size = 0;
+
+ lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Degamma LUT size. Should be %u but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, size);
+ return -EINVAL;
+ }
+
+ lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES &&
+ size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
+ size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
* @crtc: amdgpu_dm crtc state
*
@@ -311,14 +342,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
bool is_legacy;
int r;
- degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
- if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
- return -EINVAL;
+ r = amdgpu_dm_verify_lut_sizes(&crtc->base);
+ if (r)
+ return r;
+ degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
- if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
- regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
- return -EINVAL;
has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a549c7c717dd..883ee517673b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -113,7 +113,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
/* Enable CRTC CRC generation if necessary. */
- if (dm_is_crc_source_crtc(source)) {
+ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, enable, enable)) {
ret = -EINVAL;
@@ -221,6 +221,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = -EINVAL;
goto cleanup;
}
+
+ if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+ DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
}
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f3dfb2887ae0..2cdcefab2d7d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -95,29 +95,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
rd_buf_ptr = rd_buf;
- str_len = strlen("Current: %d %d %d ");
- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
+ str_len = strlen("Current: %d 0x%x %d ");
+ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
link->cur_link_settings.lane_count,
link->cur_link_settings.link_rate,
link->cur_link_settings.link_spread);
rd_buf_ptr += str_len;
- str_len = strlen("Verified: %d %d %d ");
- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ str_len = strlen("Verified: %d 0x%x %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
link->verified_link_cap.lane_count,
link->verified_link_cap.link_rate,
link->verified_link_cap.link_spread);
rd_buf_ptr += str_len;
- str_len = strlen("Reported: %d %d %d ");
- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ str_len = strlen("Reported: %d 0x%x %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
link->reported_link_cap.lane_count,
link->reported_link_cap.link_rate,
link->reported_link_cap.link_spread);
rd_buf_ptr += str_len;
- str_len = strlen("Preferred: %d %d %d ");
- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ str_len = strlen("Preferred: %d 0x%x %d ");
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
link->preferred_link_setting.lane_count,
link->preferred_link_setting.link_rate,
link->preferred_link_setting.link_spread);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 5815983caaf8..0d2e13627c64 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
cntl->enable_dp_audio);
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
cntl->enable_dp_audio));
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
* driver choose program it itself, i.e. here we program it
* to 888 by default.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
* driver choose program it itself, i.e. here we pass required
* target rate that includes deep color.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index dd92f9c295b4..9f301f8575a5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -97,8 +97,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
new_clocks->dppclk_khz = 100000;
}
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
- if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ /*
+ * Temporally ignore thew 0 cases for disp and dpp clks.
+ * We may have a new feature that requires 0 clks in the future.
+ */
+ if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
+ new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+ new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
+ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 68d56a91d44b..14dc1b8719a9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1961,7 +1961,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
- dc->hwss.optimize_bandwidth(dc, dc->current_state);
+ dc->optimized_required = true;
+
} else {
if (!dc->optimize_seamless_boot)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
@@ -2049,6 +2050,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
}
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3efee7b3378a..6b03267021ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -936,6 +936,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink != NULL)
dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return false;
+ }
+ /*
+ * Abort detection for DP connectors if we have
+ * no EDID and connector is active converter
+ * as there are no display downstream
+ *
+ */
+ if (dc_is_dp_sst_signal(link->connector_signal) &&
+ (link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
return false;
}
@@ -1303,6 +1321,11 @@ static bool construct(
goto ddc_create_fail;
}
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
link->ddc_hw_inst =
dal_ddc_get_line(
dal_ddc_service_get_ddc_pin(link->ddc));
@@ -2268,7 +2291,7 @@ enum dc_status dc_link_validate_mode_timing(
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
*/
- if (link->remote_sinks[0])
+ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
return DC_OK;
/* Passive Dongle */
@@ -2909,8 +2932,12 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (timing->flags.DSC) {
- kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
- kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ struct fixed31_32 link_bw_kbps;
+
+ link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
+ link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
+ link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
+ kbps = dc_fixpt_ceil(link_bw_kbps);
return kbps;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6dd2334dd5e6..4bc95e9075e9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1284,6 +1284,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0];
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ dc_sink_retain(link->local_sink);
+ dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
} else if (mst_enable == true &&
link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) {
@@ -1914,6 +1916,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
@@ -3378,7 +3383,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
if (edp_config_set.bits.PANEL_MODE_EDP
!= panel_mode_edp) {
- enum ddc_result result = DDC_RESULT_UNKNOWN;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
edp_config_set.bits.PANEL_MODE_EDP =
panel_mode_edp;
@@ -3388,7 +3393,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
&edp_config_set.raw,
sizeof(edp_config_set.raw));
- ASSERT(result == DDC_RESULT_SUCESSFULL);
+ ASSERT(result == DC_OK);
}
}
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f25ac17f47fa..de246e183d6b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1546,6 +1546,10 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
+ /*compare audio info*/
+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index f787a6b94781..eca67d5d5b10 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
pll_settings->use_external_clk;
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
+ break;
+ case COLOR_DEPTH_121212:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
+ break;
+ case COLOR_DEPTH_161616:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
+ break;
+ default:
+ break;
+ }
+
if (clk_src->bios->funcs->set_pixel_clock(
clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 6ed922a3c1cd..c25840a774f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -563,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.color_depth = crtc_timing->display_color_depth;
if (enc110->base.bp->funcs->encoder_control(
enc110->base.bp, &cntl) != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index ab63d0d0304c..6fd57cfb112f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -429,12 +429,12 @@ static void set_clamp(
clamp_max = 0x3FC0;
break;
case COLOR_DEPTH_101010:
- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
- clamp_max = 0x3FFC;
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
+ clamp_max = 0x3FF0;
break;
case COLOR_DEPTH_121212:
- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
- clamp_max = 0x3FFF;
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
break;
default:
clamp_max = 0x3FC0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index d67e0abeee93..11a89d873384 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -484,10 +484,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
- enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
- if (dpp->base.ctx->dc->debug.use_max_lb)
- return mem_cfg;
+ if (dpp->base.ctx->dc->debug.use_max_lb) {
+ if (scl_data->format == PIXEL_FORMAT_420BPP8
+ || scl_data->format == PIXEL_FORMAT_420BPP10)
+ return LB_MEMORY_CONFIG_3;
+ return LB_MEMORY_CONFIG_0;
+ }
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 60123db7ba02..fa3acf60e7bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2202,14 +2202,18 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
@@ -3264,13 +3268,12 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
struct dc_clock_config clock_cfg = {0};
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
- context, clock_type, &clock_cfg);
-
- if (!dc->clk_mgr->funcs->get_clock)
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
return DC_FAIL_UNSUPPORTED_1;
+ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
+ context, clock_type, &clock_cfg);
+
if (clk_khz > clock_cfg.max_clock_khz)
return DC_FAIL_CLK_EXCEED_MAX;
@@ -3288,7 +3291,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
else
return DC_ERROR_UNEXPECTED;
- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
+ if (dc->clk_mgr->funcs->update_clocks)
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
context, true);
return DC_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index ddf66046616d..6718777c826d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -898,10 +898,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+ * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 250;
+ max_retries = DP_BLANK_MAX_RETRY * 501;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 69e2aae42394..b250ef75c163 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-17 Advanced Micro Devices, Inc.
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -179,11 +179,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 083c42e521f5..f7965a5d2444 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -126,7 +126,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
/* This value is dependent on the hardware pipeline delay so set once per SOC */
- REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
+ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
}
void dcn20_display_init(struct dc *dc)
{
@@ -1740,14 +1740,18 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx, &blnd_cfg.black_color);
}
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 8d5cfd5357c7..03e207333953 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -362,7 +362,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
- h_blank_start - 200 - 1,
+ (h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 08062de3fbeb..d2ea4c003d44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2232,7 +2232,7 @@ void dcn20_set_mcif_arb_params(
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
+ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2;
wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
@@ -2917,7 +2917,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (voltage_supported && dummy_pstate_supported) {
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index bb7add5ea227..acc2c307c871 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -826,6 +826,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -1138,6 +1140,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -1498,6 +1501,14 @@ static bool construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 6c6c486b774a..945d23ca3677 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3435,6 +3435,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
+ mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 0fafd693ffb4..5b5ed1be19ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -3467,6 +3467,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
+ mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f8f85490e77e..8a6fb58d0199 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex(
enum gpio_mode mode)
{
if (gpio->pin) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_ALREADY_OPENED;
}
// No action if allocation failed during gpio construct
if (!gpio->hw_container.ddc) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
gpio->mode = mode;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index d03165e71dc6..0be817f8cae6 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -53,8 +53,8 @@
*/
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx)
{
struct gpio_service *service;
@@ -67,14 +67,14 @@ struct gpio_service *dal_gpio_service_create(
return NULL;
}
- if (!dal_hw_translate_init(&service->translate, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_translate_init(&service->translate, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
- if (!dal_hw_factory_init(&service->factory, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_factory_init(&service->factory, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 5db29bf582d3..70370b282f91 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -299,8 +299,8 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
- [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index cbe7818529bb..d6f988403941 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
@@ -222,12 +227,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -332,12 +340,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index d0ccd81ad5b4..ad3e5621a174 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -163,7 +163,7 @@ enum irq_type
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
- ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+ ((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
/* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 30ec80ac6fc8..8da322582b68 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -57,7 +57,7 @@
* general debug capabilities
*
*/
-#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
#define ASSERT_CRITICAL(expr) do { \
if (WARN_ON(!(expr))) { \
kgdb_breakpoint(); \
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 9c55d247227e..7e3240e73c1f 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -42,8 +42,8 @@ void dal_gpio_destroy(
struct gpio **ptr);
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx);
struct gpio *dal_gpio_service_create_irq(
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 51d07a4561ce..e042d8ce05b4 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1576,7 +1576,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index d885d642ed7f..537736713598 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -85,7 +85,8 @@
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
-#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__QMS_EN 0x04
+#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@@ -94,7 +95,7 @@
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
-#define MASK_VTEM_MD2__RESERVED3 0xF8
+#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
diff --git a/drivers/gpu/drm/amd/include/navi10_enum.h b/drivers/gpu/drm/amd/include/navi10_enum.h
index d5ead9680c6e..84bcb96f76ea 100644
--- a/drivers/gpu/drm/amd/include/navi10_enum.h
+++ b/drivers/gpu/drm/amd/include/navi10_enum.h
@@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/
typedef enum ENUM_NUM_SIMD_PER_CU {
-NUM_SIMD_PER_CU = 0x00000004,
+NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU;
/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 1b55f037ba4a..7cde55854b65 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1533,6 +1533,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to reset to default!", result = tmp_result);
+ tmp_result = smum_stop_smc(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop smc!", result = tmp_result);
+
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to force to switch arbf0!", result = tmp_result);
@@ -2864,7 +2868,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
if (hwmgr->is_kicker)
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
else
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..f6490a128438 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -229,6 +229,7 @@ struct pp_smumgr_func {
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
+ int (*stop_smc)(struct pp_hwmgr *hwmgr);
};
struct pp_hwmgr_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index c5288831aa15..05a55e850b5e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -114,4 +114,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 09a3d8ae4449..42c8f8731a50 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2725,10 +2725,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS,
- VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
static int ci_smu_init(struct pp_hwmgr *hwmgr)
@@ -2936,6 +2933,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
return 0;
}
+static void ci_reset_smc(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static int ci_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ ci_reset_smc(hwmgr);
+ ci_stop_smc_clock(hwmgr);
+
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.name = "ci_smu",
.smu_init = ci_smu_init,
@@ -2960,4 +2980,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
.update_smc_table = ci_update_smc_table,
+ .stop_smc = ci_stop_smc,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 4240aeec9000..83d06f8e99ec 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -217,3 +217,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
return -EINVAL;
}
+
+int smum_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->smumgr_funcs->stop_smc)
+ return hwmgr->smumgr_funcs->stop_smc(hwmgr);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 98e915e325dd..bc3f42e915e9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -264,6 +264,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
+ if (!formats) {
+ kfree(kplane);
+ return -ENOMEM;
+ }
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
@@ -274,8 +278,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
komeda_put_fourcc_list(formats);
- if (err)
- goto cleanup;
+ if (err) {
+ kfree(kplane);
+ return err;
+ }
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 587d94798f5c..af729094260c 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -483,7 +483,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 0b2bb485d9be..7bf348d28fbf 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_disable = malidp_de_plane_disable,
};
+static const uint64_t linear_only_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
*/
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats, n,
- (id == DE_SMART) ? NULL : modifiers, plane_type,
- NULL);
+ (id == DE_SMART) ? linear_only_modifiers : modifiers,
+ plane_type, NULL);
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 018383cfcfa7..5e95bcea43e9 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -3,6 +3,7 @@ config DRM_ASPEED_GFX
tristate "ASPEED BMC Display Controller"
depends on DRM && OF
depends on (COMPILE_TEST || ARCH_ASPEED)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e466e72c..e7bf32f234d7 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1225,6 +1225,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return 0;
err_unregister_cec:
+ cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
clk_disable_unprepare(adv7511->cec_clk);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 1f26890a8da6..c6a51d1c7ec9 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1630,8 +1630,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct analogix_dp_device *dp = to_dp(aux);
+ int ret;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = analogix_dp_detect_hpd(dp);
+ if (ret)
+ goto out;
- return analogix_dp_transfer(dp, msg);
+ ret = analogix_dp_transfer(dp, msg);
+out:
+ pm_runtime_put(dp->dev);
+
+ return ret;
}
struct analogix_dp_device *
@@ -1696,8 +1707,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dp->reg_base))
- return ERR_CAST(dp->reg_base);
+ if (IS_ERR(dp->reg_base)) {
+ ret = PTR_ERR(dp->reg_base);
+ goto err_disable_clk;
+ }
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1709,7 +1722,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- return ERR_CAST(dp->hpd_gpiod);
+ ret = PTR_ERR(dp->hpd_gpiod);
+ goto err_disable_clk;
}
if (dp->hpd_gpiod) {
@@ -1729,7 +1743,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- return ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_disable_clk;
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1738,11 +1753,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- return ERR_PTR(ret);
+ goto err_disable_clk;
}
disable_irq(dp->irq);
return dp;
+
+err_disable_clk:
+ clk_disable_unprepare(dp->clock);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 914c569ab8c1..cab3f5c4e2fc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1086,11 +1086,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
if (!blocking)
return 0;
+ /*
+ * db[1]!=0: entering PSR, wait for fully active remote frame buffer.
+ * db[1]==0: exiting PSR, wait for either
+ * (a) ACTIVE_RESYNC - the sink "must display the
+ * incoming active frames from the Source device with no visible
+ * glitches and/or artifacts", even though timings may still be
+ * re-synchronizing; or
+ * (b) INACTIVE - the transition is fully complete.
+ */
ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
psr_status >= 0 &&
((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
- (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
- DP_TIMEOUT_PSR_LOOP_MS * 1000);
+ (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC ||
+ psr_status == DP_PSR_SINK_INACTIVE))),
+ 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000);
if (ret) {
dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 6166dca6be81..9c3f9110895e 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1026,7 +1026,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet;
int ret, i, tx_len, rx_len;
- ret = pm_runtime_get_sync(host->dev);
+ ret = pm_runtime_resume_and_get(host->dev);
if (ret < 0)
return ret;
@@ -1284,6 +1284,7 @@ static const struct of_device_id cdns_dsi_of_match[] = {
{ .compatible = "cdns,dsi" },
{ },
};
+MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
static struct platform_driver cdns_dsi_platform_driver = {
.probe = cdns_dsi_drm_probe,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 6e81e5db57f2..5302dd90a7a5 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -291,16 +291,11 @@ out:
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
}
-static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
- const struct i2c_device_id *id)
+static int ge_b850v3_register(void)
{
+ struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c;
struct device *dev = &stdp4028_i2c->dev;
- ge_b850v3_lvds_init(dev);
-
- ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
- i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
-
/* drm bridge initialization */
ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
@@ -321,6 +316,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
"ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr);
}
+static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &stdp4028_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
+
+ if (ret)
+ return ret;
+
+ ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
+ i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
+
+ /* Only register after both bridges are probed */
+ if (!ge_b850v3_lvds_ptr->stdp2690_i2c)
+ return 0;
+
+ return ge_b850v3_register();
+}
+
static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
@@ -354,13 +370,21 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp2690_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
- ge_b850v3_lvds_init(dev);
+ if (ret)
+ return ret;
ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
- return 0;
+ /* Only register after both bridges are probed */
+ if (!ge_b850v3_lvds_ptr->stdp4028_i2c)
+ return 0;
+
+ return ge_b850v3_register();
}
static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 04431dbac4a4..fb0b64c965b7 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2118,7 +2118,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
if (ret) {
dev_err(ctx->dev, "Failed to register RC device\n");
ctx->error = ret;
- rc_free_device(ctx->rc_dev);
+ rc_free_device(rc_dev);
return;
}
ctx->rc_dev = rc_dev;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 675442bfc1bd..2fc27931d3b5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
if (lpm)
val |= CMD_MODE_ALL_LP;
- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
dsi_write(dsi, DSI_CMD_MODE_CFG, val);
}
@@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
unsigned long mode_flags)
{
+ u32 val;
+
dsi_write(dsi, DSI_PWR_UP, RESET);
if (mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
dw_mipi_dsi_video_mode_config(dsi);
- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
} else {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
}
+ val = PHY_TXREQUESTCLKHS;
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ val |= AUTO_CLKLANE_CTRL;
+ dsi_write(dsi, DSI_LPCLK_CTRL, val);
+
dsi_write(dsi, DSI_PWR_UP, POWERUP);
}
@@ -1052,6 +1057,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f1de4bb6558c..dbb4a374cb64 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -115,6 +115,7 @@ static const struct regmap_config ti_sn_bridge_regmap_config = {
.val_bits = 8,
.volatile_table = &ti_sn_bridge_volatile_table,
.cache_type = REGCACHE_NONE,
+ .max_register = 0xFF,
};
static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2dd2cd87cdbb..5e906ea6df67 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2948,7 +2948,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd986c68..4f785a783e01 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -268,9 +268,10 @@ int drm_master_open(struct drm_file *file_priv)
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2337b3827e6a..11a81e8ba963 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1984,6 +1984,9 @@ EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable)
{
+ if (!connector->vrr_capable_property)
+ return;
+
drm_object_property_set_value(&connector->base,
connector->vrr_capable_property,
capable);
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index b5ac1581e623..d595697d3d7e 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_fb_helper.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
-#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-#endif
-
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 00debd02c322..0ba92428ef56 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
struct task_struct *task;
+ bool is_current_master = drm_is_current_master(priv);
rcu_read_lock(); /* locks pid_task()->comm */
task = pid_task(priv->pid, PIDTYPE_PID);
@@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
- drm_is_current_master(priv) ? 'y' : 'n',
+ is_current_master ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), uid),
priv->magic);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 6a626c82e264..f6598c5a9a87 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
- if (ret)
+ if (ret) {
+ kfree(source);
return ret;
+ }
spin_lock_irq(&crc->lock);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0cfb386754c3..0d7f90c00f04 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
- if (!kref_get_unless_zero(&aux_dev->refcount))
+ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 2de1eebe591f..1ff13af13324 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3657,6 +3657,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
+ kfree(mst_edid);
}
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9b69e55ad701..2dc6dd6230d7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1702,9 +1702,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
connector_bad_edid(connector, edid, edid[0x7e] + 1);
- edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
- edid[0x7e] = valid_extensions;
-
new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
GFP_KERNEL);
if (!new)
@@ -1721,6 +1718,9 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
base += EDID_LENGTH;
}
+ new[EDID_LENGTH - 1] += new[0x7e] - valid_extensions;
+ new[0x7e] = valid_extensions;
+
kfree(edid);
edid = new;
}
@@ -4380,7 +4380,8 @@ bool drm_detect_monitor_audio(struct edid *edid)
if (!edid_ext)
goto end;
- has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+ has_audio = (edid_ext[0] == CEA_EXT &&
+ (edid_ext[3] & EDID_BASIC_AUDIO) != 0);
if (has_audio) {
DRM_DEBUG_KMS("Monitor has basic audio support\n");
@@ -4533,16 +4534,8 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
connector->name, dc_bpc);
info->bpc = dc_bpc;
- /*
- * Deep color support mandates RGB444 support for all video
- * modes and forbids YCRCB422 support for all video modes per
- * HDMI 1.3 spec.
- */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
-
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
connector->name);
}
@@ -4659,6 +4652,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return quirks;
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, edid);
/*
@@ -4707,7 +4701,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6b8502bcf0fd..4ae68bf04892 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -965,11 +965,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
- return -EINVAL;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (cmap->start + cmap->len > crtc->gamma_size)
- return -EINVAL;
+ if (cmap->start + cmap->len > crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -982,8 +986,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
ret = crtc->funcs->gamma_set(crtc, r, g, b,
crtc->gamma_size, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
drm_modeset_unlock_all(fb_helper->dev);
return ret;
@@ -2410,24 +2415,3 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
-
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable console.
- */
-int __init drm_fb_helper_modinit(void)
-{
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char name[] = "fbcon";
- struct module *fbcon;
-
- mutex_lock(&module_mutex);
- fbcon = find_module(name);
- mutex_unlock(&module_mutex);
-
- if (!fbcon)
- request_module_nowait(name);
-#endif
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f5918707672f..d88f4230c221 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -474,14 +474,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
+ vm_fault_t ret;
struct page *page;
+ pgoff_t page_offset;
- if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
- return VM_FAULT_SIGBUS;
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- page = shmem->pages[vmf->pgoff];
+ mutex_lock(&shmem->pages_lock);
+
+ if (page_offset >= num_pages ||
+ WARN_ON_ONCE(!shmem->pages) ||
+ shmem->madv < 0) {
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ page = shmem->pages[page_offset];
+
+ ret = vmf_insert_page(vma, vmf->address, page);
+ }
- return vmf_insert_page(vma, vmf->address, page);
+ mutex_unlock(&shmem->pages_lock);
+
+ return ret;
}
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
@@ -549,9 +563,6 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
-
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 22c7fd7196c8..1c691bdb8914 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
+ memset(&v, 0, sizeof(v));
+
v = (struct drm_version) {
.name_len = v32.name_len,
.name = compat_ptr(v32.name),
@@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
+
+ memset(&uq, 0, sizeof(uq));
+
uq = (struct drm_unique){
.unique_len = uq32.unique_len,
.unique = compat_ptr(uq32.unique),
@@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
+ memset(&client, 0, sizeof(client));
+
client.idx = c32.idx;
err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
@@ -850,12 +857,12 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
+ memset(&req, 0, sizeof(req));
+
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
- if (err)
- return err;
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -864,7 +871,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
- return 0;
+ return err;
}
#if defined(CONFIG_X86)
@@ -887,6 +894,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
struct drm_mode_fb_cmd2 req64;
int err;
+ memset(&req64, 0, sizeof(req64));
+
if (copy_from_user(&req64, argp,
offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index fcd728d7cf72..76b6676b0106 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
- mutex_lock(&master->dev->master_mutex);
+ mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return 0;
}
@@ -825,6 +826,9 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
+
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 221a8528c993..f933da1656eb 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- int ret;
-
- /* Call init functions from specific kms helpers here */
- ret = drm_fb_helper_modinit();
- if (ret < 0)
- goto out;
-
- ret = drm_dp_aux_dev_init();
- if (ret < 0)
- goto out;
-
-out:
- return ret;
+ /*
+ * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable
+ * console.
+ */
+ if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
+ IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ !IS_ENABLED(CONFIG_EXPERT))
+ request_module_nowait("fbcon");
+
+ return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 58f5dc2f6dd5..f5ab891731d0 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data onegx1_pro = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "12/17/2020", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@@ -102,6 +109,18 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
+ .width = 1280,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -127,6 +146,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO 2021 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -141,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Max */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
@@ -178,6 +209,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 3 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
@@ -186,6 +223,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
+ }, { /* KD Kurio Smart C15200 2-in-1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@@ -204,13 +248,50 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* Lenovo Ideapad D330 */
+ }, { /* Lenovo Ideapad D330-10IGM (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ .matches = {
+ /* Non exact match to match all versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* OneGX1 Pro */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
+ },
+ .driver_data = (void *)&onegx1_pro,
+ }, { /* OneXPlayer */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* Samsung GalaxyBook 10.6 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
+ },
+ .driver_data = (void *)&lcd1280x1920_rightside_up,
+ }, { /* Valve Steam Deck */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index d6ad60ab0d38..6bdebcca5690 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -186,6 +186,13 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ /*
+ * First driver to need more than 64 formats needs to fix this. Each
+ * format is encoded as a bit and the current code only supports a u64.
+ */
+ if (WARN_ON(format_count > 64))
+ return -EINVAL;
+
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
@@ -207,13 +214,6 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return -ENOMEM;
}
- /*
- * First driver to need more than 64 formats needs to fix this. Each
- * format is encoded as a bit and the current code only supports a u64.
- */
- if (WARN_ON(format_count > 64))
- return -EINVAL;
-
if (format_modifiers) {
const uint64_t *temp_modifiers = format_modifiers;
while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 3aae7ea522f2..c3f2292dc93d 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -123,7 +123,6 @@ static int drm_plane_helper_check_update(struct drm_plane *plane,
.crtc_w = drm_rect_width(dst),
.crtc_h = drm_rect_height(dst),
.rotation = rotation,
- .visible = *visible,
};
struct drm_crtc_state crtc_state = {
.crtc = crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4b5c7b0ed714..e558381c4c96 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -326,19 +326,27 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
- if (!ret)
- return 0;
+ if (!ret) {
+ /* If the requested seqno is already signaled
+ * drm_syncobj_find_fence may return a NULL
+ * fence. To make sure the recipient gets
+ * signalled, use a new fence instead.
+ */
+ if (!*fence)
+ *fence = dma_fence_get_stub();
+
+ goto out;
+ }
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
@@ -370,6 +378,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 0c9c40720ca9..35225ff8792d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
- etnaviv_iommu_context_get(mmu_context);
- gpu->mmu_context = mmu_context;
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index cb1faaac380a..519948637186 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -304,8 +304,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- etnaviv_iommu_context_get(mmu_context);
- mapping->context = mmu_context;
+ mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1ba83a90cdef..9baf5af919e1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -471,6 +471,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
+ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
+ DRM_ERROR("submit arguments out of size limits\n");
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
@@ -534,8 +540,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
submit->ctx = file->driver_priv;
- etnaviv_iommu_context_get(submit->ctx->mmu);
- submit->mmu_context = submit->ctx->mmu;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 85de8551ce86..db35736d47af 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -545,6 +545,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
return 0;
}
@@ -607,19 +613,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
+
+ gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
@@ -790,7 +800,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -994,8 +1003,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
- gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1306,14 +1313,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- if (!gpu->mmu_context) {
- etnaviv_iommu_context_get(submit->mmu_context);
- gpu->mmu_context = submit->mmu_context;
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- etnaviv_iommu_context_get(gpu->mmu_context);
- submit->prev_mmu_context = gpu->mmu_context;
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1530,7 +1535,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->mmu_context) {
+ if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1543,8 +1548,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
- etnaviv_iommu_context_put(gpu->mmu_context);
- gpu->mmu_context = NULL;
+ gpu->fe_running = false;
}
gpu->exec_state = -1;
@@ -1692,6 +1696,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8f9bd4edc96a..02478c75f896 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
bool initialized;
+ bool fe_running;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 1a7c89a67bea..afe5dd6a9925 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f8bf488e9d71..d664ae29ae20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 3607d348c298..790cbb20aaeb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -204,6 +204,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
@@ -288,6 +289,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
mutex_lock(&context->lock);
+ /* Bail if the mapping has been reaped by another thread */
+ if (!mapping->context) {
+ mutex_unlock(&context->lock);
+ return;
+ }
+
/* If the vram node is on the mm, unmap and remove the node */
if (mapping->vram_node.mm == &context->mm)
etnaviv_iommu_remove_mapping(context, mapping);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index d1d6902fd13b..e4a0b7d09c2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
+ return ctx;
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 58b89ec11b0e..a3c9d8b9e1a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -140,6 +140,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
+ else
+ mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 570b59520fd1..87738650dd90 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2120,12 +2120,12 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder);
- if (ret == 0) {
+ if (ret <= 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
cdv_intel_dp_encoder_destroy(encoder);
cdv_intel_dp_destroy(connector);
- goto err_priv;
+ goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e28107061148..fc9a34ed58bd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
- if (i2c_dev == NULL) {
- DRM_ERROR("Can't allocate interface\n");
- ret = -ENOMEM;
- goto exit;
- }
+ if (!i2c_dev)
+ return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
- goto err;
+ goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
- return ret;
+ if (ret) {
+ DRM_ERROR("Failed to add I2C adapter\n");
+ goto free_irq;
+ }
-err:
+ return 0;
+
+free_irq:
+ free_irq(dev->irq, hdmi_dev);
+free_dev:
kfree(i2c_dev);
-exit:
+
return ret;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 7005f8f69c68..d414525eccf6 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -313,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_err;
+ ret = -ENOMEM;
+
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 4256410535f0..65e67e12a0a1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -532,14 +532,15 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
- struct drm_crtc *crtc = NULL;
+ struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
if (gma_crtc->pipe == pipe)
- break;
+ return crtc;
}
- return crtc;
+ return NULL;
}
int gma_connector_clones(struct drm_device *dev, int type_mask)
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6265fb85626..56bb34d04332 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -337,6 +337,7 @@ int psb_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -349,20 +350,12 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, true);
@@ -375,6 +368,7 @@ void psb_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -383,14 +377,10 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
_PSB_IRQ_MSVDX_FLAG |
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index c103005b0a33..a34ef5ec7d42 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -376,7 +376,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
hibmc_unload(dev);
- drm_dev_put(dev);
}
static struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33feb46..ce8182bd0b55 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -83,13 +83,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
return;
}
+ if (!pkg->package.count) {
+ DRM_DEBUG_DRIVER("no connection in _DSM\n");
+ return;
+ }
+
connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id = &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
+ union acpi_object *connector_id;
+ union acpi_object *info;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+ DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+ continue;
+ }
+
+ connector_id = &obj->package.elements[0];
+ info = &obj->package.elements[1];
+ if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+ DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+ continue;
+ }
+
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ae31836aa4ee..4e1c228b9e99 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11893,10 +11893,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
case 10 ... 11:
bpp = 10 * 3;
break;
- case 12:
+ case 12 ... 16:
bpp = 12 * 3;
break;
default:
+ MISSING_CASE(conn_state->max_bpc);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2efc317c90df..85583f914630 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -166,6 +166,12 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
+static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
+{
+ intel_dp->sink_rates[0] = 162000;
+ intel_dp->num_sink_rates = 1;
+}
+
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
@@ -567,7 +573,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
+ min_slice_count = max_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
@@ -3634,7 +3640,7 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
* link status information
*/
bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status)
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
@@ -4261,6 +4267,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_psr_init_dpcd(intel_dp);
+ /* Clear the default sink rates */
+ intel_dp->num_sink_rates = 0;
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -4706,7 +4715,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool bret;
if (intel_dp->is_mst) {
- u8 esi[DP_DPRX_ESI_LEN] = { 0 };
+ /*
+ * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
+ * pass in "esi+10" to drm_dp_channel_eq_ok(), which
+ * takes a 6-byte array. So we actually need 16 bytes
+ * here.
+ *
+ * Somebody who knows what the limits actually are
+ * should check this, but for now this is at least
+ * harmless and avoids a valid compiler warning about
+ * using more of the array than we have allocated.
+ */
+ u8 esi[DP_DPRX_ESI_LEN+2] = {};
int ret = 0;
int retry;
bool handled;
@@ -7156,6 +7176,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return false;
intel_dp_set_source_rates(intel_dp);
+ intel_dp_set_default_sink_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b030f7ae3302..4cf95e8031e3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2129,7 +2129,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
return MODE_CLOCK_HIGH;
- /* BXT DPLL can't generate 223-240 MHz */
+ /* GLK DPLL can't generate 446-480 MHz */
+ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ return MODE_CLOCK_RANGE;
+
+ /* BXT/GLK DPLL can't generate 223-240 MHz */
if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 29edfc343716..aeb7cb651fa4 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -913,6 +913,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
const struct intel_crtc_state *pipe_config =
overlay->crtc->config;
+ if (rec->dst_height == 0 || rec->dst_width == 0)
+ return -EINVAL;
+
if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h &&
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index bc14e9c0285a..23edc1b8e43f 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -1603,20 +1603,21 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
if (cpu_mode) {
DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, panel->backlight.level);
+ lpt_set_backlight(connector->base.state, val);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index a71b22bdd95b..7f329d8118a4 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -797,10 +797,20 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- /* Deassert reset */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ /*
+ * Give the panel time to power-on and then deassert its reset.
+ * Depending on the VBT MIPI sequences version the deassert-seq
+ * may contain the necessary delay, intel_dsi_msleep() will skip
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+ if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+ msleep(intel_dsi->panel_on_delay);
+ }
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9c58e8fac1d9..a4b48c9abeac 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -606,21 +606,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/*
- * Already in the desired write domain? Nothing for us to do!
- *
- * We apply a little bit of cunning here to catch a broader set of
- * no-ops. If obj->write_domain is set, we must be in the same
- * obj->read_domains, and only that domain. Therefore, if that
- * obj->write_domain matches the request read_domains, we are
- * already in the same read/write domain and can skip the operation,
- * without having to further check the requested write_domain.
- */
- if (READ_ONCE(obj->write_domain) == read_domains) {
- err = 0;
- goto out;
- }
-
- /*
* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
@@ -657,6 +642,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains)
+ goto out_unpin;
+
err = i915_gem_object_lock_interruptible(obj);
if (err)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3d8dff2d894a..5c21c06189c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -367,7 +367,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -936,7 +936,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1163,6 +1163,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -1450,7 +1452,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
urelocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
- if (unlikely(remain > N_RELOC(ULONG_MAX)))
+ if (unlikely((unsigned long)remain > N_RELOC(ULONG_MAX)))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 05289edbafe3..876f59098f7e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -181,7 +181,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
- chunk = roundup(chunk, tile_row_pages(obj));
+ chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 08b35587bc6d..352c102f3459 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -118,6 +118,9 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
+ unsigned long flags;
+#define I915_BO_WAS_BOUND_BIT 0
+
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 18f0ce0135c1..aa63fa0ab575 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,8 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "gt/intel_gt.h"
+
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -176,6 +178,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_gt_invalidate_tlbs(&i915->gt);
+ }
+
return pages;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4ce8626b140e..8073758d1036 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -354,7 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
- RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+ (RUNTIME_INFO(i915)->vdbox_sfc_access &
+ BIT(engine->instance))) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d48ec9a76ed1..c8c070375d29 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -15,6 +15,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
spin_lock_init(&gt->irq_lock);
+ mutex_init(&gt->tlb_invalidate_lock);
+
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -266,3 +268,100 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_reset(gt);
}
+
+struct reg_and_bit {
+ i915_reg_t reg;
+ u32 bit;
+};
+
+static struct reg_and_bit
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
+ const i915_reg_t *regs, const unsigned int num)
+{
+ const unsigned int class = engine->class;
+ struct reg_and_bit rb = { };
+
+ if (WARN_ON_ONCE(class >= num || !regs[class].reg))
+ return rb;
+
+ rb.reg = regs[class];
+ if (gen8 && class == VIDEO_DECODE_CLASS)
+ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
+ else
+ rb.bit = engine->instance;
+
+ rb.bit = BIT(rb.bit);
+
+ return rb;
+}
+
+void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+{
+ static const i915_reg_t gen8_regs[] = {
+ [RENDER_CLASS] = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
+ [COPY_ENGINE_CLASS] = GEN8_BTCR,
+ };
+ static const i915_reg_t gen12_regs[] = {
+ [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const i915_reg_t *regs;
+ unsigned int num = 0;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (INTEL_GEN(i915) == 12) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (INTEL_GEN(i915) < 8) {
+ return;
+ }
+
+ if (WARN_ONCE(!num, "Platform does not implement TLB invalidation!"))
+ return;
+
+ GEM_TRACE("\n");
+
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ mutex_lock(&gt->tlb_invalidate_lock);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ /*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+ const unsigned int timeout_us = 100;
+ const unsigned int timeout_ms = 4;
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ if (__intel_wait_for_register_fw(uncore,
+ rb.reg, rb.bit, 0,
+ timeout_us, timeout_ms,
+ NULL))
+ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
+ engine->name, timeout_ms);
+ }
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ mutex_unlock(&gt->tlb_invalidate_lock);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4920cb351f10..4eab15bdcd97 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -57,4 +57,6 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
void intel_gt_queue_hangcheck(struct intel_gt *gt);
+void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index dc295c196d11..82a78719b32d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -40,6 +40,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct mutex tlb_invalidate_lock;
+
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index c169f0f70f3a..398068b17389 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1574,6 +1574,9 @@ static void process_csb(struct intel_engine_cs *engine)
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
WRITE_ONCE(execlists->pending[0], NULL);
break;
@@ -2242,6 +2245,9 @@ err:
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cea184a7dde9..e97a2aa31485 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -130,7 +130,19 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
- L3_3_WB)
+ L3_3_WB),
+
+ /*
+ * mocs:63
+ * - used by the L3 for all of its evictions.
+ * Thus it is expected to allow LLC cacheability to enable coherent
+ * flows to be maintained.
+ * - used to force L3 uncachable cycles.
+ * Thus it is expected to make the surface L3 uncacheable.
+ */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 9cb01d9828f1..c970e3deb008 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -289,6 +289,14 @@ void intel_timeline_fini(struct intel_timeline *timeline)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
+
+ /*
+ * A small race exists between intel_gt_retire_requests_timeout and
+ * intel_timeline_exit which could result in the syncmap not getting
+ * free'd. Rather than work to hard to seal this race, simply cleanup
+ * the syncmap on fini.
+ */
+ i915_syncmap_free(&timeline->sync);
}
struct intel_timeline *
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 59aa5e64acb0..21a562c2b1f5 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -172,21 +172,176 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
int pipe;
if (IS_BROXTON(dev_priv)) {
+ enum transcoder trans;
+ enum port port;
+
+ /* Clear PIPE, DDI, PHY, HPD before setting new */
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
BXT_DE_PORT_HP_DDIB |
BXT_DE_PORT_HP_DDIC);
+ for_each_pipe(dev_priv, pipe) {
+ vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
+ ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE);
+ vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
+ vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
+ }
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
+ }
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+
+ for (port = PORT_A; port <= PORT_C; port++) {
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |=
+ (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK);
+
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &=
+ ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
+ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
+ PORT_PLL_ENABLE);
+
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &=
+ ~(DDI_INIT_DISPLAY_DETECTED |
+ DDI_BUF_CTL_ENABLE);
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
+ }
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
+
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30);
+
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED;
+
+ /*
+ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+ * TRANSCODER_A can be enabled. PORT_x depends on the input of
+ * setup_virtual_dp_monitor.
+ */
+ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE;
+
+ /*
+ * Golden M/N are calculated based on:
+ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+ * DP link clk 1620 MHz and non-constant_n.
+ * TODO: calculate DP link symbol clk and stream clk m/n.
+ */
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
+
+ /* Enable per-DDI/PORT vreg */
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1);
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+ PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |=
+ BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+ BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK);
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |=
+ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
+ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
+ PORT_PLL_ENABLE);
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |=
+ (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED);
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
+ ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
BXT_DE_PORT_HP_DDIA;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
+ BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+ BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK);
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |=
+ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
+ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
+ PORT_PLL_ENABLE);
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |=
+ DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
+ ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_B << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
BXT_DE_PORT_HP_DDIB;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
+ BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+ BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK);
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |=
+ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
+ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
+ PORT_PLL_ENABLE);
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |=
+ DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
+ ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_B << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
BXT_DE_PORT_HP_DDIC;
}
@@ -511,6 +666,63 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+ } else if (IS_BROXTON(dev_priv)) {
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIA;
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~BXT_DE_PORT_HP_DDIA;
+ }
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ BXT_DE_PORT_HP_DDIA;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTA_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIB;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIB_DETECTED;
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~BXT_DE_PORT_HP_DDIB;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIB_DETECTED;
+ }
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ BXT_DE_PORT_HP_DDIB;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTB_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIC;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIC_DETECTED;
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~BXT_DE_PORT_HP_DDIC;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIC_DETECTED;
+ }
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ BXT_DE_PORT_HP_DDIC;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTC_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eefa0a02..a738c7e456dd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -128,7 +128,7 @@ static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
return true;
}
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
@@ -146,7 +146,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
gvt_vgpu_type_groups[i] = group;
}
- return true;
+ return 0;
unwind:
for (j = 0; j < i; j++) {
@@ -154,7 +154,7 @@ unwind:
kfree(group);
}
- return false;
+ return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
@@ -362,7 +362,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret == false) {
+ if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 689b07bc91c4..245c20d36f1b 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1632,6 +1632,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+/**
+ * FixMe:
+ * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
+ * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
+ * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
+ * these MI_BATCH_BUFFER.
+ * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
+ * PML4 PTE: PAT(0) PCD(1) PWT(1).
+ * The performance is still expected to be low, will need further improvement.
+ */
+static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u64 pat =
+ GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
+
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -2778,7 +2806,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
@@ -3104,7 +3132,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
return 0;
}
@@ -3278,9 +3306,17 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(HSW_CS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(_MMIO(0x12600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(BCS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(_MMIO(0x1a600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..e0e7adc545a5 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -271,6 +271,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
+ SKL_FUSE_DOWNLOAD_STATUS |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
}
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 32e57635709a..4deb7fec5eb5 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -432,8 +432,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- /*TODO: add more platforms support */
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ if (IS_BROADWELL(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv))
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
+ else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 89b6112bd66b..00335a1c02b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,6 +33,8 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
+#include <asm/hypervisor.h>
+
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -1892,7 +1894,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
@@ -2197,7 +2199,9 @@ static inline bool intel_vtd_active(void)
if (intel_iommu_gfx_mapped)
return true;
#endif
- return false;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index fe9edbba997c..b9af329eb3a3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -307,6 +307,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -392,6 +394,7 @@ static int compress_page(struct compress *c,
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
+ cond_resched();
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7b6e68f082f8..1386d0f5eac6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2519,6 +2519,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -2602,6 +2608,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..c24f49ee10d7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -341,6 +341,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
vma->flags |= bind_flags;
+
+ if (vma->obj)
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ade607d93e45..d59455b2d401 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2822,7 +2822,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[8])
+ u16 wm[])
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2966,7 +2966,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const u16 wm[8])
+ const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -3017,9 +3017,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
- ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
- ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
if (!changed)
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9e583f13a9e4..62f8a47fda45 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1124,6 +1124,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
spin_unlock(&uncore->debug->lock);
}
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
+ u##x val = __raw_uncore_read##x(uncore, reg); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val; \
+}
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(uncore->rpm);
@@ -1327,6 +1339,16 @@ __gen_reg_write_funcs(gen8);
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
+#define __vgpu_write(x) \
+static void \
+vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ __raw_uncore_write##x(uncore, reg, val); \
+}
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+
#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
@@ -1647,7 +1669,10 @@ static void uncore_raw_init(struct intel_uncore *uncore)
{
GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
- if (IS_GEN(uncore->i915, 5)) {
+ if (intel_vgpu_active(uncore->i915)) {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
+ } else if (IS_GEN(uncore->i915, 5)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index d6629fc869f3..116473c2360a 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
u32 bus_format = imx_ldb_ch->bus_format;
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
if (mode->clock > 170000) {
dev_warn(ldb->dev,
"%s: mode exceeds 170 MHz pixel clock\n", __func__);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 2256c9789fc2..f19264e91d4d 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,7 +68,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index be55548f352a..e24272428744 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -68,8 +68,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
return ret;
+ }
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index d86b8d81a483..155971c57b2d 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -293,6 +293,7 @@ int lima_device_init(struct lima_device *ldev)
struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(ldev->dev, UINT_MAX);
err = lima_clk_init(ldev);
if (err)
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 16e5fb9ec784..82946ffcb6d2 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -410,8 +410,8 @@ static int mcde_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index cb29b649fcdb..12bf93769497 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -84,7 +84,7 @@ static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
u32 tmp = readl(cec->regs + offset) & ~mask;
tmp |= val & mask;
- writel(val, cec->regs + offset);
+ writel(tmp, cec->regs + offset);
}
void mtk_cec_set_hpd_event(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f9455f2724d2..f370d41b3d04 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -240,7 +240,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
drm_connector_list_iter_end(&conn_iter);
}
- ret = pm_runtime_get_sync(crtc->dev->dev);
+ ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
index 5223498502c4..23a74eb5d7f8 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
hdmi_phy->conf->hdmi_phy_disable_tmds)
return &mtk_hdmi_phy_dev_ops;
- dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
- return NULL;
+ if (hdmi_phy)
+ dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
+ return NULL;
}
static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index a24f8dec5adc..61a6536e7e61 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -420,6 +420,17 @@ static int meson_probe_remote(struct platform_device *pdev,
return count;
}
+static void meson_drv_shutdown(struct platform_device *pdev)
+{
+ struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ return;
+
+ drm_kms_helper_poll_fini(priv->drm);
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -469,6 +480,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 05fce48ceee0..f7da816a5562 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -590,6 +590,11 @@
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16)
+
/* osd2 scaler */
#define OSD2_VSC_PHASE_STEP 0x3d00
#define OSD2_VSC_INI_PHASE 0x3d01
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 68cf2c2eca5f..33698814c022 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -356,9 +356,14 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
- else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
+ /* fix green/pink color distortion from vendor u-boot */
+ writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+ OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+ priv->io_base + _REG(OSD1_HDR2_CTRL));
+ }
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index c8fb21cc0d6f..e3579e5ffa14 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -581,8 +581,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
-
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -1133,8 +1131,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index a3a06db675ba..ee3ff32da004 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -300,7 +300,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
- else
+ else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index ab75f0309d4b..a3ae6c1d341b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -773,8 +773,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
return 0;
@@ -891,6 +891,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
BUG_ON(!node);
ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 691c1a277d91..dfcbb2b7cdda 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
int i;
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
- sizeof(a6xx_state->indexed_regs));
+ sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 36c85c05b7cf..4aed5e9a84a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
struct drm_display_mode *mode;
- int cnt = 0, rc = 0, mixer_width, i, z_pos;
+ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
int multirect_count = 0;
@@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = mode->hdisplay / cstate->num_mixers;
+ if (cstate->num_mixers) {
+ mixer_width = mode->hdisplay / cstate->num_mixers;
- _dpu_crtc_setup_lm_bounds(crtc, state);
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+ }
crtc_rect.x2 = mode->hdisplay;
crtc_rect.y2 = mode->vdisplay;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 179e8d52cadb..a08ca7a47400 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -281,10 +281,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
int i;
for (i = 0; i < ctx->mixer_count; i++) {
- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
+
+ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 4f8b813aab81..8256f06218d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -137,11 +137,13 @@ static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
u32 *idx)
{
int rc = 0;
- const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+ const struct dpu_sspp_sub_blks *sblk;
- if (!ctx)
+ if (!ctx || !ctx->cap || !ctx->cap->sblk)
return -EINVAL;
+ sblk = ctx->cap->sblk;
+
switch (s_id) {
case DPU_SSPP_SRC:
*idx = sblk->src_blk.base;
@@ -404,7 +406,7 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
(void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
- || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ || !scaler3_cfg)
return;
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 58b0485dc375..c08c67338d73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -88,8 +88,8 @@ static int _dpu_danger_signal_status(struct seq_file *s,
&status);
} else {
seq_puts(s, "\nSafe signal status:\n");
- if (kms->hw_mdp->ops.get_danger_status)
- kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ if (kms->hw_mdp->ops.get_safe_status)
+ kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
&status);
}
pm_runtime_put_sync(&kms->pdev->dev);
@@ -599,8 +599,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ }
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 50711ccc8691..f0a5767b69f5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
- dev->mode_config.allow_fb_modifiers = true;
-
out:
pm_runtime_put_sync(dev->dev);
@@ -110,13 +108,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- /* see 119ecb7fd */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_crtc_vblank_get(crtc);
}
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
@@ -135,12 +126,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- struct drm_crtc *crtc;
-
- /* see 119ecb7fd */
- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
- drm_crtc_vblank_put(crtc);
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -260,6 +245,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
return PTR_ERR(encoder);
}
@@ -269,6 +255,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
return PTR_ERR(connector);
}
@@ -420,6 +407,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_gem_address_space *aspace;
@@ -434,7 +422,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp_kms_init(&mdp4_kms->base, &kms_funcs);
- kms = &mdp4_kms->base.base;
+ priv->kms = &mdp4_kms->base.base;
+ kms = priv->kms;
mdp4_kms->dev = dev;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index da3cc1d8c331..ee1dbb2b84af 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
return mdp4_plane->pipe;
}
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
@@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
- NULL, type, NULL);
+ supported_format_modifiers, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index eeef41fcd4e1..0425400f44db 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -41,7 +41,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev;
- u32 total_lines_x100, vclks_line, cfg;
+ u32 total_lines, vclks_line, cfg;
long vsync_clk_speed;
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
@@ -51,8 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
- total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
- if (!total_lines_x100) {
+ total_lines = mode->vtotal * drm_mode_vrefresh(mode);
+ if (!total_lines) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
@@ -64,15 +64,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
vsync_clk_speed);
return -EINVAL;
}
- vclks_line = vsync_clk_speed * 100 / total_lines_x100;
+ vclks_line = vsync_clk_speed / total_lines;
cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+ /*
+ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
+ * the vsync_clk equating to roughly half the desired panel refresh rate.
+ * This is only necessary as stability fallback if interrupts from the
+ * panel arrive too late or not at all, but is currently used by default
+ * because these panel interrupts are not wired up yet.
+ */
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
mdp5_write(mdp5_kms,
- REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
+
mdp5_write(mdp5_kms,
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 03c6d6157e4d..03d60eb09257 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -534,9 +534,15 @@ int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
if (ret)
return ret;
- mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
if (old_r_mixer) {
- mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
@@ -903,8 +909,10 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
- if (ret)
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
return -EINVAL;
+ }
pm_runtime_get_sync(&pdev->dev);
@@ -1099,7 +1107,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done);
- complete(&mdp5_crtc->pp_completion);
+ complete_all(&mdp5_crtc->pp_completion);
}
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 77823ccdd0f8..39d0082eedcc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -698,9 +698,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
pdev = mdp5_kms->pdev;
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 954db683ae44..2536def2a000 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
return 0;
}
-void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
- struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
+ struct mdp5_hw_mixer_state *new_state;
if (!mixer)
- return;
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 43c9ba43ce18..545ee223b9d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
-void mdp5_mixer_release(struct drm_atomic_state *s,
- struct mdp5_hw_mixer *mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ba6695963aa6..a4f5cb90f3e8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -119,18 +119,23 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
return 0;
}
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *state = mdp5_get_global_state(s);
- struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+ struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
- return;
+ return 0;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
@@ -141,6 +146,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index 9b26d0761bd4..cca67938cab2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 83423092de2f..0dc23c86747e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
/* assign default blend parameters */
mdp5_state->alpha = 255;
@@ -390,12 +393,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_state->r_hwpipe = NULL;
- mdp5_pipe_release(state->state, old_hwpipe);
- mdp5_pipe_release(state->state, old_right_hwpipe);
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
}
} else {
- mdp5_pipe_release(state->state, mdp5_state->hwpipe);
- mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 55ea4bc2ee9c..16194971a99f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -26,17 +26,22 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
}
phy_pdev = of_find_device_by_node(phy_node);
- if (phy_pdev)
+ if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev);
+ msm_dsi->phy_dev = &phy_pdev->dev;
+ }
of_node_put(phy_node);
- if (!phy_pdev || !msm_dsi->phy) {
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!msm_dsi->phy) {
+ put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
-
- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
return 0;
}
@@ -206,8 +211,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+ ret = -EINVAL;
goto fail;
+ }
msm_dsi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 1e7b1be25bb0..743142e15b4c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -460,7 +460,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
return 0;
err:
- for (; i > 0; i--)
+ while (--i >= 0)
clk_disable_unprepare(msm_host->bus_clks[i]);
return ret;
@@ -1348,10 +1348,10 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
- if (!len) {
+ if (len < 0) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
- return -EINVAL;
+ return len;
}
/* for video mode, do not send cmds more than
@@ -1370,10 +1370,14 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
}
ret = dsi_cmd_dma_tx(msm_host, len);
- if (ret < len) {
- pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
- __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
- return -ECOMM;
+ if (ret < 0) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+ return ret;
+ } else if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+ return -EIO;
}
return len;
@@ -1669,6 +1673,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
+ /* Set the number of date lanes to 4 by default. */
+ msm_host->num_data_lanes = 4;
return 0;
}
@@ -2097,9 +2103,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
}
ret = dsi_cmds2buf_tx(msm_host, msg);
- if (ret < msg->tx_len) {
+ if (ret < 0) {
pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
return ret;
+ } else if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+ return -ECOMM;
}
/*
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 73127948f54d..f3ff2cdc288b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -625,7 +625,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
- connector->funcs->destroy(msm_dsi->connector);
+ connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 21519229fe73..60d50643d0b5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -665,12 +665,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req)
{
- struct device *dev = &phy->pdev->dev;
+ struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
+ dev = &phy->pdev->dev;
+
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1afb7c579dbb..eca86bf448f7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
- .io_start = { 0xfd998300, 0xfd9a0300 },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index aa9385d5bfff..33033b94935e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
u32 val;
+ int ret;
val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
@@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
val |= cached->pll_mux;
pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+ ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_10nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
DBG("DSI PLL%d", pll_10nm->id);
return 0;
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..bbbd96f9eaa0 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1082,7 +1082,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
int msm_edp_ctrl_init(struct msm_edp *edp)
{
struct edp_ctrl *ctrl = NULL;
- struct device *dev = &edp->pdev->dev;
+ struct device *dev;
int ret;
if (!edp) {
@@ -1090,6 +1090,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
return -EINVAL;
}
+ dev = &edp->pdev->dev;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 355afb936401..e4c9ff934e5b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
- if (!phy_pdev || !hdmi->phy) {
+ if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
+ if (!hdmi->phy) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ put_device(&phy_pdev->dev);
+ return -EPROBE_DEFER;
+ }
hdmi->phy_dev = get_device(&phy_pdev->dev);
@@ -137,6 +142,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
/* HDCP needs physical address of hdmi register */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
config->mmio_name);
+ if (!res) {
+ ret = -EINVAL;
+ goto fail;
+ }
hdmi->mmio_phy_addr = res->start;
hdmi->qfprom_mmio = msm_ioremap(pdev,
@@ -306,9 +315,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
}
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+ if (!hdmi->irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1c74381a4fc9..08995e981808 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 108632a1f243..407b51cf6790 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -337,7 +337,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node);
if (ret)
return ret;
- size = r.end - r.start;
+ size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
@@ -432,14 +432,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
@@ -567,6 +567,7 @@ err_free_priv:
kfree(priv);
err_put_drm_dev:
drm_dev_put(ddev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1326,6 +1327,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index ad2703698b05..cd59a5918038 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
- DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d92a0ffe2a76..8e6a4d5f3a40 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1036,7 +1036,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
@@ -1124,7 +1124,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index d7c8948427fe..705a834ba1e6 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -17,7 +17,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 0dca8f27169e..33916b7b2c50 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,6 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index e8506335cd15..1694a7deb913 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -26,6 +26,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
@@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_axi);
}
+static struct drm_framebuffer *
+mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
+ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
+}
+
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
+ .fb_create = mxsfb_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index dc64863b5fd8..f517b39aba56 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -179,7 +179,7 @@ nv04_display_destroy(struct drm_device *dev)
nvif_notify_fini(&disp->flip);
nouveau_display(dev)->priv = NULL;
- kfree(disp);
+ vfree(disp);
nvif_object_unmap(&drm->client.device.object);
}
@@ -197,7 +197,7 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ disp = vzalloc(sizeof(*disp));
if (!disp)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index ee2b1e1199e0..daa79d39201f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -132,7 +132,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -167,7 +167,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 7c41b0599d1a..284068fa6d00 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -70,7 +70,7 @@ struct nv50_dmac {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
u32 *evo_wait(struct nv50_dmac *, int nr);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index f7dbd965e4e7..b49a212af4d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -68,7 +68,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f5f59261ea81..d1beaad0c82b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -14,6 +14,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_LVDS_SPWG = 0x41,
DCB_CONNECTOR_DP = 0x46,
DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_mDP = 0x48,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
DCB_CONNECTOR_HDMI_C = 0x63,
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c94dbf3..f2f3280c3a50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -51,8 +51,9 @@ static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
- if (nb < 0 || nb >= 100)
+ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+ if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -280,7 +281,7 @@ nouveau_backlight_init(struct drm_connector *connector)
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
@@ -306,7 +307,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
return;
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f8015e0318d7..f7603be569fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -542,7 +542,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
/* Don't waste time looping if the object is coherent */
@@ -562,7 +562,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
/* Don't waste time looping if the object is coherent */
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 282fd90b65e1..9ce7b0d4b876 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -497,6 +497,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
+ goto done;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 0994aee7671a..496e7dcd6b7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1240,6 +1240,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 3b13feca970f..3c54d61e4fa9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
.open = nouveau_debugfs_pstate_open,
.read = seq_read,
.write = nouveau_debugfs_pstate_set,
+ .release = single_release,
};
static struct drm_info_list nouveau_debugfs_list[] = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5347e5bdee8c..e18938972a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -779,7 +779,7 @@ nouveau_drm_device_remove(struct drm_device *dev)
struct nvkm_client *client;
struct nvkm_device *device;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
dev->irq_enabled = false;
client = nvxx_client(&drm->client.base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7d39d4949ee7..2dd9fcab464b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -197,7 +197,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ drm_gem_object_release(&nvbo->bo.base);
+ kfree(nvbo);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c002f8968507..9682f30ab6f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -176,6 +176,8 @@ void
nouveau_mem_del(struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
+ if (!mem)
+ return;
nouveau_mem_fini(mem);
kfree(reg->mm_node);
reg->mm_node = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index bae6a3eccee0..f9ee562f72d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -112,7 +112,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- return 0;
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+ if (ret)
+ goto error;
+
+ if (nvbo->bo.moving)
+ ret = dma_fence_wait(nvbo->bo.moving, true);
+
+ ttm_bo_unreserve(&nvbo->bo);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ nouveau_bo_unpin(nvbo);
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 824654742a60..8556804e96ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -112,11 +112,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
- unsigned long addr, end, size;
+ unsigned long addr, end;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
- args->va_end &= PAGE_MASK;
+ args->va_end = ALIGN(args->va_end, PAGE_SIZE);
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
@@ -125,8 +125,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
- if (!args->npages)
- return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
@@ -158,12 +156,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (args->stride)
return -EINVAL;
- size = ((unsigned long)args->npages) << PAGE_SHIFT;
- if ((args->va_start + size) <= args->va_start)
- return -EINVAL;
- if ((args->va_start + size) > args->va_end)
- return -EINVAL;
-
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
@@ -178,7 +170,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
- for (addr = args->va_start, end = args->va_start + size; addr < end;) {
+ for (addr = args->va_start, end = args->va_end; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
@@ -314,6 +306,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index b0ece71aefde..ce774579c89d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
- args->v0.pwrsrc = -ENOSYS;
+ args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 0e372a190d3f..9b6972c95358 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
+ if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 818d21bd28d3..1d2837c5a8f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret;
}
-static void
+void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 428b3f488f03..e484d0c3b0d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
index 6e3c450eaace..3ff49344abc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
- nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index c62030c96fba..4b1c72fd8f03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
+#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
@@ -216,6 +217,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
+
+ /* The EFI GOP driver on Ampere can leave unused DP links routed,
+ * which we don't expect. The DisableLT IED script *should* get
+ * us back to where we need to be.
+ */
+ if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+ nvkm_dp_disable(outp, ior);
+
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index f3c30b2a788e..8bff14ae16b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
*addr += bios->imaged_addr;
}
- if (unlikely(*addr + size >= bios->size)) {
+ if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 40e564524b7a..93a49cbfb81d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -135,10 +135,10 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
- break;
+ return cstate;
}
- return cstate;
+ return NULL;
}
static struct nvkm_cstate *
@@ -169,6 +169,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
if (!list_empty(&pstate->list)) {
cstate = nvkm_cstate_get(clk, pstate, cstatei);
cstate = nvkm_cstate_find_best(clk, pstate, cstate);
+ if (!cstate)
+ return -EINVAL;
} else {
cstate = &pstate->base;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index edb6148cbca0..d0e80ad52684 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index d80dbc8f09b2..55a4ea4393c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 9025ed1bd2a9..4caf3ef087e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index ee11ccaf0563..cb51e248cb41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5ebb1acc..3dd6c0087edb 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -891,6 +891,7 @@ static int omap_dmm_probe(struct platform_device *dev)
&omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
+ ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index f152bc4eeb53..e70ccadf7541 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -141,6 +141,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
depends on OF
depends on I2C
depends on BACKLIGHT_CLASS_DEVICE
+ select CRC32
help
The panel is used with different sizes LCDs, from 480x272 to
1280x800, and 24 bit per pixel.
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d92d1c98878c..df90b6607981 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -509,6 +509,7 @@ static void innolux_panel_del(struct innolux_panel *innolux)
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
{
const struct panel_desc *desc;
+ struct innolux_panel *innolux;
int err;
desc = of_device_get_match_data(&dsi->dev);
@@ -520,7 +521,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ innolux = mipi_dsi_get_drvdata(dsi);
+ innolux_panel_del(innolux);
+ return err;
+ }
+
+ return 0;
}
static int innolux_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 3ac04eb8d0fe..1e7fecab72a9 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -424,7 +424,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ kingdisplay_panel_del(kingdisplay);
+ return err;
+ }
+
+ return 0;
}
static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 2aa89eaecf6f..a621dd28ff70 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -232,7 +232,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -268,7 +268,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0;
}
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@@ -298,6 +298,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@@ -352,7 +359,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
- .prepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
@@ -453,7 +460,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
- kfree(ts->dsi);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 6d9656323a3f..312a3c4e2331 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1619,7 +1619,7 @@ static const struct display_timing innolux_g070y2_l01_timing = {
static const struct panel_desc innolux_g070y2_l01 = {
.timings = &innolux_g070y2_l01_timing,
.num_timings = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 152,
.height = 91,
@@ -2382,12 +2382,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
static const struct panel_desc ortustech_com43h4m85ulc = {
.modes = &ortustech_com43h4m85ulc_mode,
.num_modes = 1,
- .bpc = 8,
+ .bpc = 6,
.size = {
.width = 56,
.height = 93,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4..413bf314a2bc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -59,7 +59,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
if (IS_ERR(pfdev->bus_clock)) {
dev_err(pfdev->dev, "get bus_clock failed %ld\n",
PTR_ERR(pfdev->bus_clock));
- return PTR_ERR(pfdev->bus_clock);
+ err = PTR_ERR(pfdev->bus_clock);
+ goto disable_clock;
}
if (pfdev->bus_clock) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index c05e013bb8e3..ac9de37a8280 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
- mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
- mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index b3517ff9630c..8088d5fd8480 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 288e46c40673..1b9f68d8e9aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ bool ret = false;
if (atomic_read(&bo->gpu_usecount))
return false;
- if (!mutex_trylock(&shmem->pages_lock))
+ if (!mutex_trylock(&bo->mappings.lock))
return false;
- panfrost_gem_teardown_mappings(bo);
+ if (!mutex_trylock(&shmem->pages_lock))
+ goto unlock_mappings;
+
+ panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(obj);
+ ret = true;
mutex_unlock(&shmem->pages_lock);
- return true;
+
+unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
+ return ret;
}
static unsigned long
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec13a0d6..0d39a201c759 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
return 0;
}
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
+{
+ /*
+ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
+ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
+ * to operate correctly.
+ */
+ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
+ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
+}
+
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
{
u32 quirks = 0;
@@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
int ret;
u32 val;
+ panfrost_gpu_init_quirks(pfdev);
+
/* Just turn on everything for now */
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
@@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return err;
}
- panfrost_gpu_init_quirks(pfdev);
panfrost_gpu_power_on(pfdev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 4112412087b2..468c51e7e46d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
+
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 3dc9b30a64b0..8a014dc11571 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -52,25 +52,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
- /*
- * fls returns:
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
- */
-
- size = round_up(size, PAGE_SIZE);
- region_width = 10 + fls(size >> PAGE_SHIFT);
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
+ /* The size is encoded as ceil(log2) minus(1), which may be calculated
+ * with fls. The size must be clamped to hardware bounds.
+ */
+ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
+ region_width = fls64(size - 1) - 1;
region |= region_width;
/* Lock the region that needs to be updated */
@@ -81,7 +72,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
@@ -98,7 +89,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
int ret;
@@ -115,7 +106,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -131,7 +122,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -231,7 +222,7 @@ static size_t get_pgsize(u64 addr, size_t size)
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
if (mmu->as < 0)
return;
@@ -494,8 +485,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
- } else
+ } else {
pages = bo->base.pages;
+ if (pages[page_offset]) {
+ /* Pages are already mapped, bail out. */
+ mutex_unlock(&bo->base.pages_lock);
+ goto out;
+ }
+ }
mapping = bo->base.base.filp->f_mapping;
mapping_set_unevictable(mapping);
@@ -529,6 +526,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+out:
panfrost_gem_mapping_put(bomapping);
return 0;
@@ -600,6 +598,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
/* Page fault only */
ret = -1;
if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
@@ -623,8 +623,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
access_type, access_type_name(pfdev, fault_status),
source_id);
- mmu_write(pfdev, MMU_INT_CLEAR, mask);
-
status &= ~mask;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index ea38ac60581c..2ae3a4d301d3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -51,6 +51,10 @@
#define GPU_STATUS 0x34
#define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
#define GPU_LATEST_FLUSH_ID 0x38
+#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
+#define GPU_PWR_KEY_UNLOCK 0x2968A819
+#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
+#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
#define GPU_FAULT_STATUS 0x3C
#define GPU_FAULT_ADDRESS_LO 0x40
#define GPU_FAULT_ADDRESS_HI 0x44
@@ -314,6 +318,8 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define gpu_read(dev, reg) readl(dev->iomem + reg)
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 92d84280096e..a6ee10cbcfdd 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -325,6 +325,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
head.id = i;
head.flags = 0;
+ head.surface_id = 0;
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;
@@ -1236,6 +1237,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
void qxl_modeset_fini(struct qxl_device *qdev)
{
+ if (qdev->dumb_shadow_bo) {
+ drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
+ qdev->dumb_shadow_bo = NULL;
+ }
qxl_destroy_monitors_object(qdev);
drm_mode_config_cleanup(&qdev->ddev);
}
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 272d19b677d8..4a0dc1348097 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
surf.height = args->height;
surf.stride = pitch;
surf.format = format;
+ surf.data = 0;
+
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_SURFACE,
args->size, &surf, &qobj,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index cc8f32a1b03c..92ffed5c1d69 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -197,7 +197,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
* so don't register a backlight device
*/
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->pdev->device == 0x6741))
+ (rdev->pdev->device == 0x6741) &&
+ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
return;
if (!radeon_encoder->enc_priv)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d59b004f6695..147087a891aa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1554,6 +1554,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
+ int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 226a7bf0eb7a..9e0aa357585f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2136,11 +2136,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info =
- kcalloc(1, sizeof(struct radeon_pm_clock_info),
- GFP_KERNEL);
+ /* avoid memory leaks from invalid modes or unknown frev. */
+ if (!rdev->pm.power_state[state_index].clock_info) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
+ }
if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
+ goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -2259,17 +2262,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break;
}
}
+out:
+ /* free any unused clock_info allocation. */
+ if (state_index && state_index < num_modes) {
+ kfree(rdev->pm.power_state[state_index].clock_info);
+ rdev->pm.power_state[state_index].clock_info = NULL;
+ }
+
/* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
+ if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
+ rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index - 1].misc = 0;
+ rdev->pm.power_state[state_index - 1].misc2 = 0;
}
return state_index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index bc63f4cecf5d..ca6ccd69424e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -477,6 +477,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -491,6 +493,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f9f74150d0d7..27b168936b2a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1333,6 +1333,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 709c4ef5e7d5..51db8b4f6d55 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -512,6 +512,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*value = rdev->config.si.backend_enable_mask;
} else {
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ return -EINVAL;
}
break;
case RADEON_INFO_MAX_SCLK:
@@ -633,6 +634,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv;
+ struct radeon_vm *vm;
int r;
file_priv->driver_priv = NULL;
@@ -645,48 +648,52 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
- struct radeon_fpriv *fpriv;
- struct radeon_vm *vm;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
r = -ENOMEM;
- goto out_suspend;
+ goto err_suspend;
}
if (rdev->accel_working) {
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_fpriv;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_vm_fini;
/* map the ib pool buffer read only into
* virtual address space */
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo);
+ if (!vm->ib_bo_va) {
+ r = -ENOMEM;
+ goto err_vm_fini;
+ }
+
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_vm_fini;
}
file_priv->driver_priv = fpriv;
}
-out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return 0;
+
+err_vm_fini:
+ radeon_vm_fini(rdev, vm);
+err_fpriv:
+ kfree(fpriv);
+
+err_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5d10e11a9225..c9ae12be8864 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1720,6 +1720,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@@ -1729,6 +1730,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
+ rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -1736,6 +1738,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
+ if (!radeon_crtc->connector)
+ continue;
+
+ radeon_connector = to_radeon_connector(radeon_crtc->connector);
+ if (radeon_connector->pixelclock_for_modeset > 297000)
+ rdev->pm.dpm.high_pixelclock_count++;
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b906e8fbd5f3..7bc33a80934c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -94,9 +94,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
+ if (unlikely(ret))
+ goto error;
+
+ if (bo->tbo.moving) {
+ ret = dma_fence_wait(bo->tbo.moving, false);
+ if (unlikely(ret)) {
+ radeon_bo_unpin(bo);
+ goto error;
+ }
+ }
+
+ bo->prime_shared_count++;
+error:
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1ad5c3b86b64..a18bf70a251e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
size = radeon_bo_size(rdev->uvd.vcpu_bo);
size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- memset(ptr, 0, size);
+ memset_io((void __iomem *)ptr, 0, size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a0b382a637a6..97bab442dd54 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3002,6 +3002,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
+
+ if (rdev->pm.dpm.high_pixelclock_count > 1)
+ disable_sclk_switching = true;
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index d505ea7d5384..8f299d76b69b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -72,6 +72,7 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
ret = regmap_write(dp->grf, reg, val);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
+ clk_disable_unprepare(dp->grf_clk);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index bc073ec5c183..f7191ae2266f 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -231,7 +231,8 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
- int devcnt;
+
+ bool dsi_bound;
};
struct dphy_pll_parameter_map {
@@ -564,13 +565,8 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
};
-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
- int mux)
+static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
{
- if (dsi->cdata->lcdsel_grf_reg)
- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
-
if (dsi->cdata->lanecfg1_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
dsi->cdata->lanecfg1);
@@ -584,6 +580,13 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
dsi->cdata->enable);
}
+static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
+ int mux)
+{
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+}
+
static int
dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -624,10 +627,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
if (mux < 0)
return;
- pm_runtime_get_sync(dsi->dev);
- if (dsi->slave)
- pm_runtime_get_sync(dsi->slave->dev);
-
/*
* For the RK3399, the clk of grf must be enabled before writing grf
* register. And for RK3288 or other soc, this grf_clk must be NULL,
@@ -639,27 +638,17 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
return;
}
- dw_mipi_dsi_rockchip_config(dsi, mux);
+ dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
if (dsi->slave)
- dw_mipi_dsi_rockchip_config(dsi->slave, mux);
+ dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
clk_disable_unprepare(dsi->grf_clk);
}
-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
-
- if (dsi->slave)
- pm_runtime_put(dsi->slave->dev);
- pm_runtime_put(dsi->dev);
-}
-
static const struct drm_encoder_helper_funcs
dw_mipi_dsi_encoder_helper_funcs = {
.atomic_check = dw_mipi_dsi_encoder_atomic_check,
.enable = dw_mipi_dsi_encoder_enable,
- .disable = dw_mipi_dsi_encoder_disable,
};
static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
@@ -794,25 +783,56 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
put_device(second);
}
+ pm_runtime_get_sync(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_get_sync(dsi->slave->dev);
+
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
- return ret;
+ goto out_pm_runtime;
}
+ /*
+ * With the GRF clock running, write lane and dual-mode configurations
+ * that won't change immediately. If we waited until enable() to do
+ * this, things like panel preparation would not be able to send
+ * commands over DSI.
+ */
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ goto out_pm_runtime;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave);
+
+ clk_disable_unprepare(dsi->grf_clk);
+
ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
- return ret;
+ goto out_pm_runtime;
}
ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
- return ret;
+ goto out_pm_runtime;
}
+ dsi->dsi_bound = true;
+
return 0;
+
+out_pm_runtime:
+ pm_runtime_put(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
+
+ return ret;
}
static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
@@ -824,9 +844,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
if (dsi->is_slave)
return;
+ dsi->dsi_bound = false;
+
dw_mipi_dsi_unbind(dsi->dmd);
clk_disable_unprepare(dsi->pllref_clk);
+
+ pm_runtime_put(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
}
static const struct component_ops dw_mipi_dsi_rockchip_ops = {
@@ -884,6 +910,36 @@ static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = {
.detach = dw_mipi_dsi_rockchip_host_detach,
};
+static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * Re-configure DSI state, if we were previously initialized. We need
+ * to do this before rockchip_drm_drv tries to re-enable() any panels.
+ */
+ if (dsi->dsi_bound) {
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ return ret;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave);
+
+ clk_disable_unprepare(dsi->grf_clk);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume)
+};
+
static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -967,23 +1023,16 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev,
"Failed to probe dw_mipi_dsi: %d\n", ret);
- goto err_clkdisable;
+ return ret;
}
return 0;
-
-err_clkdisable:
- clk_disable_unprepare(dsi->pllref_clk);
- return ret;
}
static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
- if (dsi->devcnt == 0)
- component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
-
dw_mipi_dsi_remove(dsi->dmd);
return 0;
@@ -1072,6 +1121,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
.remove = dw_mipi_dsi_rockchip_remove,
.driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+ .pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
},
};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 906891b03a38..7805091bac32 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -528,13 +528,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
- ret);
- return ret;
- }
-
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
@@ -543,6 +536,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 84e3decb17b1..2e4e1933a43c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1848,10 +1848,10 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
vop_win_init(vop);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vop->len = resource_size(res);
vop->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ vop->len = resource_size(res);
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 1a5153197fe9..57f9baad9e36 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -235,11 +235,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
+ struct dma_fence *f;
int r;
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;
+ /* Wait for all dependencies to avoid data corruptions */
+ while ((f = job->sched->ops->dependency(job, entity)))
+ dma_fence_wait(f, false);
+
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 134e9106ebac..37679507f943 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -851,6 +851,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
if (sched->thread)
kthread_stop(sched->thread);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 37e90e42943f..0e2d304f0d83 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
default MACH_SUN8I
select CRC_CCITT
select DRM_MIPI_DSI
+ select RESET_CONTROLLER
select PHY_SUN6I_MIPI_DPHY
help
Choose this option if you want have an Allwinner SoC with
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ec2a032e07b9..7186ba73d8e1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
uint64_t modifier = fb->modifier;
+ unsigned int ch1_phase_idx;
u32 out_fmt_val;
u32 in_fmt_val, in_mod_val, in_ps_val;
unsigned int i;
@@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* I have no idea what this does exactly, but it seems to be
* related to the scaler FIR filter phase parameters.
*/
+ ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
- frontend->data->ch_phase[0].horzphase);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
- frontend->data->ch_phase[1].horzphase);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
- frontend->data->ch_phase[0].vertphase[0]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
- frontend->data->ch_phase[1].vertphase[0]);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
- frontend->data->ch_phase[0].vertphase[1]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
- frontend->data->ch_phase[1].vertphase[1]);
+ frontend->data->ch_phase[ch1_phase_idx]);
/*
* Checking the input format is sufficient since we currently only
@@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
};
static const struct sun4i_frontend_data sun4i_a10_frontend = {
- .ch_phase = {
- {
- .horzphase = 0,
- .vertphase = { 0, 0 },
- },
- {
- .horzphase = 0xfc000,
- .vertphase = { 0xfc000, 0xfc000 },
- },
- },
+ .ch_phase = { 0x000, 0xfc000 },
.has_coef_rdy = true,
};
static const struct sun4i_frontend_data sun8i_a33_frontend = {
- .ch_phase = {
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- },
+ .ch_phase = { 0x400, 0xfc400 },
.has_coef_access_ctrl = true,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 0c382c1ddb0f..2e7b76e50c2b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -115,11 +115,7 @@ struct reset_control;
struct sun4i_frontend_data {
bool has_coef_access_ctrl;
bool has_coef_rdy;
-
- struct {
- u32 horzphase;
- u32 vertphase[2];
- } ch_phase[2];
+ u32 ch_phase[2];
};
struct sun4i_frontend {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ae7ae432aa4a..eb3b2350687f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -545,30 +545,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
+ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
@@ -665,6 +648,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+ /* Setup the polarity of multiple signals */
+ if (tcon->quirks->polarity_in_ch0) {
+ val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+ } else {
+ /* according to vendor driver, this bit must be always set */
+ val = SUN4I_TCON1_IO_POL_UNKNOWN;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
+ }
+
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@@ -1482,6 +1489,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
+ .polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec826ae71..ce500c8dd4c7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -113,6 +113,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
+#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
@@ -153,6 +154,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
+/* there is no documentation about this bit */
+#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
+#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
+
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@@ -224,6 +230,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a44dca4b0219..8f721be26477 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -49,11 +49,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
{
/*
* Controller support maximum of 594 MHz, which correlates to
- * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
- * 340 MHz scrambling has to be enabled. Because scrambling is
- * not yet implemented, just limit to 340 MHz for now.
+ * 4K@60Hz 4:4:4 or RGB.
*/
- if (mode->clock > 340000)
+ if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -209,6 +207,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
phy_node = of_parse_phandle(dev->of_node, "phys", 0);
if (!phy_node) {
dev_err(dev, "Can't found PHY phandle\n");
+ ret = -EINVAL;
goto err_disable_clk_tmds;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 43643ad31730..a4012ec13d4b 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
- { 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
- { 59400000, { 0x0008, 0x0008, 0x0008 }, },
- { 72000000, { 0x0008, 0x0008, 0x001b }, },
- { 74250000, { 0x0013, 0x0013, 0x0013 }, },
- { 90000000, { 0x0008, 0x001a, 0x001b }, },
- { 118800000, { 0x001b, 0x001a, 0x001b }, },
- { 144000000, { 0x001b, 0x001a, 0x0034 }, },
- { 180000000, { 0x001b, 0x0033, 0x0034 }, },
- { 216000000, { 0x0036, 0x0033, 0x0034 }, },
- { 237600000, { 0x0036, 0x0033, 0x001b }, },
- { 288000000, { 0x0036, 0x001b, 0x001b }, },
- { 297000000, { 0x0019, 0x001b, 0x0019 }, },
- { 330000000, { 0x0036, 0x001b, 0x001b }, },
- { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { 74250000, { 0x0013, 0x001a, 0x001b }, },
+ { 148500000, { 0x0019, 0x0033, 0x0034 }, },
+ { 297000000, { 0x0019, 0x001b, 0x001b }, },
+ { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
- { 74250000, 0x8009, 0x0004, 0x0232},
- { 148500000, 0x8029, 0x0004, 0x0273},
- { 594000000, 0x8039, 0x0004, 0x014a},
+ { 27000000, 0x8009, 0x0007, 0x02b0 },
+ { 74250000, 0x8009, 0x0006, 0x022d },
+ { 148500000, 0x8029, 0x0006, 0x0270 },
+ { 297000000, 0x8039, 0x0005, 0x01ab },
+ { 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 345b28b0a80a..dc4300a7b019 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -114,10 +114,10 @@
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
/* format 15 doesn't exist */
-/* format 16 is P010 YVU */
-#define SUN8I_MIXER_FBFMT_P010_YUV 17
-/* format 18 is P210 YVU */
-#define SUN8I_MIXER_FBFMT_P210_YUV 19
+#define SUN8I_MIXER_FBFMT_P010_YUV 16
+/* format 17 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 18
+/* format 19 is P210 YVU */
/* format 20 is packed YVU444 10-bit */
/* format 21 is packed YUV444 10-bit */
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbf57bc3cdab..c410221824c1 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -919,6 +919,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_disable = tegra_cursor_atomic_disable,
};
+static const uint64_t linear_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
@@ -947,7 +952,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL,
+ num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
@@ -1065,7 +1070,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL, type, NULL);
+ num_formats, linear_modifiers,
+ type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -1667,6 +1673,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n",
state->pclk);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1677,11 +1688,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-
- err = clk_set_rate(dc->clk, state->pclk);
- if (err < 0)
- dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
- dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bc7cc32140f8..c2ab7cfaaf2f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -122,8 +122,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
- drm->mode_config.allow_fb_modifiers = true;
-
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
@@ -256,7 +254,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- idr_init(&fpriv->contexts);
+ idr_init_base(&fpriv->contexts, 1);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index a5d47e301c5f..13413d2b2602 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1455,8 +1455,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
dsi->slave = platform_get_drvdata(gangster);
of_node_put(np);
- if (!dsi->slave)
+ if (!dsi->slave) {
+ put_device(&gangster->dev);
return -EPROBE_DEFER;
+ }
dsi->slave->master = dsi;
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 75e65d9536d5..0419b6105c8a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2875,21 +2875,21 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
- return err;
+ goto rpm_put;
}
usleep_range(1000, 3000);
@@ -2899,19 +2899,25 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
- return err;
+ clk_disable_unprepare(sor->clk);
+ goto rpm_put;
}
reset_control_release(sor->rst);
}
err = clk_prepare_enable(sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk);
return err;
+ }
err = clk_prepare_enable(sor->clk_dp);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk);
return err;
+ }
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
@@ -2923,6 +2929,12 @@ static int tegra_sor_init(struct host1x_client *client)
tegra_sor_writel(sor, value, SOR_INT_MASK);
return 0;
+
+rpm_put:
+ if (sor->rst)
+ pm_runtime_put(sor->dev);
+
+ return err;
}
static int tegra_sor_exit(struct host1x_client *client)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 43d756b7810e..67e23317c7de 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -58,11 +58,13 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = NULL, *iter;
- list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
- if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
+ if (iter->possible_crtcs & (1 << priv->crtc->index)) {
+ encoder = iter;
break;
+ }
if (!encoder) {
dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3ce8ad7603c7..3ffcbaa13878 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -761,7 +761,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
(place->lpfn && place->lpfn <= bo->mem.start))
return false;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 416f24823c0a..02836d4e8023 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -210,8 +210,8 @@ static int tve200_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ddb61a60c610..ef69773b5bed 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -29,7 +29,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
ret = usb_control_msg(udl->udev,
usb_rcvctrlpipe(udl->udev, 0),
(0x02), (0x80 | (0x02 << 5)), bval,
- 0xA1, read_buff, 2, HZ);
+ 0xA1, read_buff, 2, 1000);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 19c092d75266..1609a85429ce 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -195,8 +195,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
- V3D_L2TCACTL_L2TFLS), 100)) {
- DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+ V3D_L2TCACTL_TMUWCF), 100)) {
+ DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 72d30d90b856..0af246a5609c 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -389,7 +389,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return ERR_PTR(-ENOMEM);
+ return NULL;
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c2307f..0d78ba017a29 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -309,6 +309,7 @@ unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
+ drm_mode_config_cleanup(drm);
vc4_bo_cache_destroy(drm);
dev_put:
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20c99e9..3ddaa817850d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -750,7 +750,7 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
- unsigned int *right, unsigned int *left,
+ unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c78fa8144776..0983949cc8c9 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -831,7 +831,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
return;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 5e5f90810aca..363f456ea713 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -205,7 +205,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
-static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@@ -220,7 +220,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
- vc4_state->dlist[vc4_state->dlist_count++] = val;
+ vc4_state->dlist_count++;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ unsigned int idx = vc4_state->dlist_count;
+
+ vc4_dlist_counter_increment(vc4_state);
+ vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@@ -871,8 +879,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE)
- vc4_state->lbm_offset = vc4_state->dlist_count++;
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index bf720206727f..0d9263f65d95 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -285,12 +285,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
- ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
+ else
+ /*
+ * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
+ * hardware will force the output padding to be 0xff.
+ */
+ ctrl |= TXP_ALPHA_INVERT;
gem = drm_fb_cma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e622485ae826..7e34307eb075 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -174,6 +174,8 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
DRM_DEBUG("add mode: %dx%d\n", width, height);
mode = drm_cvt_mode(connector->dev, width, height, 60,
false, false, false);
+ if (!mode)
+ return count;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index c190702fab72..4f855b242dfd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -96,8 +96,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
+ spin_unlock(&vgdev->display_info_lock);
return;
}
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
@@ -216,6 +218,7 @@ err_ttm:
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
+ dev->dev_private = NULL;
kfree(vgdev);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7ac20490e1b4..0ca996e6fd5c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -80,9 +80,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE);
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -142,10 +140,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
@@ -572,9 +566,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
int i = le32_to_cpu(cmd->capset_index);
spin_lock(&vgdev->display_info_lock);
- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ if (vgdev->capsets) {
+ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ } else {
+ DRM_ERROR("invalid capset memory.");
+ }
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
}
@@ -988,8 +986,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
}
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
+ ents = kvmalloc_array(nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index d5585695c64d..45d6ebbdbdb2 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
+ (i * composer->pitch)
+ (j * composer->cpp);
/* XRGB format ignores Alpha channel */
- memset(vaddr_out + src_offset + 24, 0, 8);
+ bitmap_clear(vaddr_out + src_offset, 24, 8);
crc = crc32_le(crc, vaddr_out + src_offset,
sizeof(u32));
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 927dafaebc76..8b01fae65f43 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -20,7 +20,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
- WARN_ON(ret_overrun != 1);
+ if (ret_overrun != 1)
+ pr_warn("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5eb73ded8e07..765f7a62870d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1002,15 +1002,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file);
+ int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ff86d49dc5e8..e3d20048075b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3413,17 +3413,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference.
*/
-void
+int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle,
- int32_t out_fence_fd, struct sync_file *sync_file)
+ int32_t out_fence_fd)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
- return;
+ return 0;
memset(&fence_rep, 0, sizeof(fence_rep));
@@ -3451,20 +3451,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- if (sync_file)
- fput(sync_file->file);
-
- if (fence_rep.fd != -1) {
- put_unused_fd(fence_rep.fd);
- fence_rep.fd = -1;
- }
-
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
TTM_REF_USAGE);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
+
+ return ret ? -EFAULT : 0;
}
/**
@@ -3806,16 +3800,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
+ }
+ }
+
+ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle, out_fence_fd);
+
+ if (sync_file) {
+ if (ret) {
+ /* usercopy of fence failed, put the file object */
+ fput(sync_file->file);
+ put_unused_fd(out_fence_fd);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle, out_fence_fd,
- sync_file);
-
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
@@ -3833,7 +3834,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/
vmw_validation_unref_lists(&val_ctx);
- return 0;
+ return ret;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ea29953e0b08..8e7b05484ba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -498,7 +498,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 178a6cd1a06f..874093a0b04f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1171,7 +1171,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 33b151988747..0b800c354049 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2570,7 +2570,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
index 90ebaedc11fd..aa8594190b50 100644
--- a/drivers/gpu/drm/zte/Kconfig
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -3,7 +3,6 @@ config DRM_ZTE
tristate "DRM Support for ZTE SoCs"
depends on DRM && ARCH_ZX
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
select SND_SOC_HDMI_CODEC if SND_SOC
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index a1c85d1521f5..82b244cb313e 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
.bits_per_pixel = 16,
};
-#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * pix->height / 4) + \
- (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 2) + (x) / 2)
-#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * pix->height / 2) + \
- (pix->width * (y) / 2) + (x) / 2)
-#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * ((y) / 2)) + (x))
-#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * y) + (x))
+#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
+#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * pix->height / 4) + \
+ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * pix->height / 2) + \
+ (pix->bytesperline * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * ((y) / 2)) + (x))
+#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * y) + (x))
#define NUM_ALPHA_CHANNELS 7
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d506fcc..74eca68891ad 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
error = rate / (sig->mode.pixelclock / 1000);
- dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
- rate, div, (signed)(error - 1000) / 10, error % 10);
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+ rate, div, error < 1000 ? '-' : '+',
+ abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {