aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c30
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c247
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/drm_atomic.c142
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c231
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c157
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c17
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c14
-rw-r--r--drivers/gpu/drm/drm_connector.c41
-rw-r--r--drivers/gpu/drm/drm_crtc.c31
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c334
-rw-r--r--drivers/gpu/drm/drm_debugfs.c89
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c92
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_dsc.c228
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c109
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c86
-rw-r--r--drivers/gpu/drm/drm_info.c137
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_mode_config.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c6
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c88
-rw-r--r--drivers/gpu/drm/drm_syncobj.c419
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c8
-rw-r--r--drivers/gpu/drm/exynos/Kconfig5
-rw-r--r--drivers/gpu/drm/exynos/Makefile3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c157
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c30
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h9
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c309
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c123
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h298
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c178
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c249
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c362
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h36
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h27
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c186
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c49
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h829
-rw-r--r--drivers/gpu/drm/i915/i915_request.c121
-rw-r--r--drivers/gpu/drm/i915/i915_request.h13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c399
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h36
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c1337
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c119
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c184
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c37
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c91
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c39
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c254
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_modes.c)129
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c162
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c666
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c77
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2220
-rw-r--r--drivers/gpu/drm/i915/intel_display.h58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c120
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h275
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c128
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h35
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c306
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c77
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c113
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h41
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c216
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c214
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c234
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c67
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c397
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c347
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c158
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c945
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c358
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c79
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h49
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c346
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c740
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1088
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c991
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h36
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c428
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c566
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c247
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c190
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c18
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c10
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c7
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c265
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c74
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h66
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c12
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c588
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.h14
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c199
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h3
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c127
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h2
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c129
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c42
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h1
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c90
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c27
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c18
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c47
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/selftests/Makefile3
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h21
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c811
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h21
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c106
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c71
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c6
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c4
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c5
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c4
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c10
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c7
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c5
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c4
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h32
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c204
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c29
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h49
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c149
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h121
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c256
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c116
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c19
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c92
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h3
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c26
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c569
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c359
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c561
308 files changed, 20477 insertions, 9721 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7f3be3506057..ce8d1d384319 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_encoder_slave.o \
+ drm_encoder_slave.o \
drm_trace_points.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
@@ -32,12 +32,12 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o
+ drm_atomic_state_helper.o drm_damage_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1e209e93dc9b..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -563,8 +563,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- amdgpu_dpm_switch_power_profile(adev,
- PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->switch_power_profile)
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4a6a1d4a88ea..5dc3ee372e2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1200,7 +1200,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2821d1d846e4..9fc3296592fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f2cd87dc365a..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -65,6 +65,13 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL 0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
/*
* Indirect registers accessor
*/
@@ -894,15 +901,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
{
uint32_t def, data;
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+ if (adev->asic_type == CHIP_VEGA20) {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
- else
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+ else
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+ } else {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ else
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ }
}
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d94c7d03bf24..3958729d6265 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4536,12 +4536,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
struct smu7_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.sclk_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -4578,12 +4578,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
struct smu7_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mclk_table);
- int value;
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value;
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 8c4db86bb4b7..e2bc6e0c229f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels
- [golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
@@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
-
- value = (mclk_table->dpm_levels
- [mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels
- [golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 74bc37308dc0..54364444ecd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega12_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
-
- value = (mclk_table->dpm_levels
- [mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels
- [golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
return value;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f2daf00cc911..2679d1240fa1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
- data->registry_data.disallowed_features = 0x0;
+ /*
+ * Disable the following features for now:
+ * GFXCLK DS
+ * SOCLK DS
+ * LCLK DS
+ * DCEFCLK DS
+ * FCLK DS
+ * MP1CLK DS
+ * MP0CLK DS
+ */
+ data->registry_data.disallowed_features = 0xE0041C00;
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -1313,12 +1323,13 @@ static int vega20_get_sclk_od(
&(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
- int value;
+ int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+ int golden_value = golden_sclk_table->dpm_levels
+ [golden_sclk_table->count - 1].value;
/* od percentage */
- value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@@ -1358,12 +1369,13 @@ static int vega20_get_mclk_od(
&(data->dpm_table.mem_table);
struct vega20_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
- int value;
+ int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+ int golden_value = golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value;
/* od percentage */
- value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 2af847ebca34..206a76abf771 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -190,7 +190,7 @@ err_unload:
arcpgu_unload(drm);
err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -201,7 +201,7 @@ static int arcpgu_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
arcpgu_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 69dab82a3771..bf589c53b908 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
+ kfree(ap);
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ ast_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5e77d456d9bb..7c6ac3cadb6b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
}
ast_bo_unreserve(bo);
+ ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
@@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+ ast_show_cursor(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 9eeb8ef0b174..2fee47b0d50b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -95,6 +95,7 @@ config DRM_SII902X
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select I2C_MUX
---help---
Silicon Image sii902x bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a13542333..bfa902013aa4 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2018 Renesas Electronics
+ *
* Copyright (C) 2016 Atmel
* Bo Shen <voice.shen@atmel.com>
*
@@ -21,6 +23,7 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/i2c-mux.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -86,8 +89,49 @@ struct sii902x {
struct drm_bridge bridge;
struct drm_connector connector;
struct gpio_desc *reset_gpio;
+ struct i2c_mux_core *i2cmux;
};
+static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
+{
+ union i2c_smbus_data data;
+ int ret;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data);
+
+ if (ret < 0)
+ return ret;
+
+ *val = data.byte;
+ return 0;
+}
+
+static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val)
+{
+ union i2c_smbus_data data;
+
+ data.byte = val;
+
+ return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA,
+ &data);
+}
+
+static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask,
+ u8 val)
+{
+ int ret;
+ u8 status;
+
+ ret = sii902x_read_unlocked(i2c, reg, &status);
+ if (ret)
+ return ret;
+ status &= ~mask;
+ status |= val & mask;
+ return sii902x_write_unlocked(i2c, reg, status);
+}
+
static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii902x, bridge);
@@ -135,41 +179,11 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
- struct regmap *regmap = sii902x->regmap;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- struct device *dev = &sii902x->i2c->dev;
- unsigned long timeout;
- unsigned int retries;
- unsigned int status;
struct edid *edid;
- int num = 0;
- int ret;
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ,
- SII902X_SYS_CTRL_DDC_BUS_REQ);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
+ int num = 0, ret;
- if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to acquire the i2c bus\n");
- return -ETIMEDOUT;
- }
-
- ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
- if (ret)
- return ret;
-
- edid = drm_get_edid(connector, sii902x->i2c->adapter);
+ edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
@@ -181,42 +195,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- /*
- * Sometimes the I2C bus can stall after failure to use the
- * EDID channel. Retry a few times to see if things clear
- * up, else continue anyway.
- */
- retries = 5;
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
- &status);
- retries--;
- } while (ret && retries);
- if (ret)
- dev_err(dev, "failed to read status (%d)\n", ret);
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
-
- if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to release the i2c bus\n");
- return -ETIMEDOUT;
- }
-
return num;
}
@@ -366,6 +344,121 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The purpose of sii902x_i2c_bypass_select is to enable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ u8 status;
+ int ret;
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ,
+ SII902X_SYS_CTRL_DDC_BUS_REQ);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "Failed to acquire the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ status);
+}
+
+/*
+ * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ unsigned int retries;
+ u8 status;
+ int ret;
+
+ /*
+ * When the HDMI transmitter is in pass through mode, we need an
+ * (undocumented) additional delay between STOP and START conditions
+ * to guarantee the bus won't get stuck.
+ */
+ udelay(30);
+
+ /*
+ * Sometimes the I2C bus can stall after failure to use the
+ * EDID channel. Retry a few times to see if things clear
+ * up, else continue anyway.
+ */
+ retries = 5;
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ retries--;
+ } while (ret && retries);
+ if (ret) {
+ dev_err(dev, "failed to read status (%d)\n", ret);
+ return ret;
+ }
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "failed to release the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -375,6 +468,13 @@ static int sii902x_probe(struct i2c_client *client,
u8 chipid[4];
int ret;
+ ret = i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA);
+ if (!ret) {
+ dev_err(dev, "I2C adapter not suitable\n");
+ return -EIO;
+ }
+
sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
if (!sii902x)
return -ENOMEM;
@@ -433,7 +533,15 @@ static int sii902x_probe(struct i2c_client *client,
i2c_set_clientdata(client, sii902x);
- return 0;
+ sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
+ 1, 0, I2C_MUX_GATE,
+ sii902x_i2c_bypass_select,
+ sii902x_i2c_bypass_deselect);
+ if (!sii902x->i2cmux)
+ return -ENOMEM;
+
+ sii902x->i2cmux->priv = sii902x;
+ return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_remove(struct i2c_client *client)
@@ -441,6 +549,7 @@ static int sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
+ i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index ee6b98efa9c2..afd491018bfc 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -379,7 +379,7 @@ static void tc358764_detach(struct drm_bridge *bridge)
drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
- drm_connector_unreference(&ctx->connector);
+ drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 68ab1821e15b..4dd499c7d1ba 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -169,7 +169,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
void *sysram;
struct drm_gem_object *gobj = NULL;
- struct cirrus_bo *bo = NULL;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -185,8 +184,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
return ret;
}
- bo = gem_to_cirrus_bo(gobj);
-
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbddae7e6..48ec378fb27e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -315,9 +315,11 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
-static int drm_atomic_crtc_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
+ const struct drm_crtc_state *new_crtc_state)
{
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
@@ -326,7 +328,7 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* TODO: Add generic modeset state checks once we support those.
*/
- if (state->active && !state->enable) {
+ if (new_crtc_state->active && !new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -336,14 +338,14 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* as this is a kernel-internal detail that userspace should never
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(state->enable && !state->mode_blob)) {
+ WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(!state->enable && state->mode_blob)) {
+ WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -359,7 +361,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
- if (state->event && !state->active && !crtc->state->active) {
+ if (new_crtc_state->event &&
+ !new_crtc_state->active && !old_crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -395,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
+ const struct drm_display_info *info = &connector->display_info;
+
+ state->max_bpc = info->bpc ? info->bpc : 8;
+ if (connector->max_bpc_property)
+ state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;
@@ -489,14 +497,13 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_plane_state);
static bool
-plane_switching_crtc(struct drm_atomic_state *state,
- struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+plane_switching_crtc(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
- if (!plane->state->crtc || !plane_state->crtc)
+ if (!old_plane_state->crtc || !new_plane_state->crtc)
return false;
- if (plane->state->crtc == plane_state->crtc)
+ if (old_plane_state->crtc == new_plane_state->crtc)
return false;
/* This could be refined, but currently there's no helper or driver code
@@ -509,88 +516,117 @@ plane_switching_crtc(struct drm_atomic_state *state,
/**
* drm_atomic_plane_check - check plane state
- * @plane: plane to check
- * @state: plane state to check
+ * @old_plane_state: old plane state to check
+ * @new_plane_state: new plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
-static int drm_atomic_plane_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
+ struct drm_plane *plane = new_plane_state->plane;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ const struct drm_framebuffer *fb = new_plane_state->fb;
unsigned int fb_width, fb_height;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
- if (state->crtc && !state->fb) {
+ if (crtc && !fb) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
plane->base.id, plane->name);
return -EINVAL;
- } else if (state->fb && !state->crtc) {
+ } else if (fb && !crtc) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
/* if disabled, we don't care about the rest of the state: */
- if (!state->crtc)
+ if (!crtc)
return 0;
/* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
- state->crtc->base.id, state->crtc->name,
+ crtc->base.id, crtc->name,
plane->base.id, plane->name);
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
- state->fb->modifier);
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
plane->base.id, plane->name,
- drm_get_format_name(state->fb->format->format,
+ drm_get_format_name(fb->format->format,
&format_name),
- state->fb->modifier);
+ fb->modifier);
return ret;
}
/* Give drivers some help against integer overflows */
- if (state->crtc_w > INT_MAX ||
- state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
- state->crtc_h > INT_MAX ||
- state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+ if (new_plane_state->crtc_w > INT_MAX ||
+ new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
+ new_plane_state->crtc_h > INT_MAX ||
+ new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
plane->base.id, plane->name,
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ new_plane_state->crtc_w, new_plane_state->crtc_h,
+ new_plane_state->crtc_x, new_plane_state->crtc_y);
return -ERANGE;
}
- fb_width = state->fb->width << 16;
- fb_height = state->fb->height << 16;
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (state->src_w > fb_width ||
- state->src_x > fb_width - state->src_w ||
- state->src_h > fb_height ||
- state->src_y > fb_height - state->src_h) {
+ if (new_plane_state->src_w > fb_width ||
+ new_plane_state->src_x > fb_width - new_plane_state->src_w ||
+ new_plane_state->src_h > fb_height ||
+ new_plane_state->src_y > fb_height - new_plane_state->src_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
plane->base.id, plane->name,
- state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
- state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
- state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
- state->fb->width, state->fb->height);
+ new_plane_state->src_w >> 16,
+ ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
+ new_plane_state->src_h >> 16,
+ ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
+ new_plane_state->src_x >> 16,
+ ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
+ new_plane_state->src_y >> 16,
+ ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
- if (plane_switching_crtc(state->state, plane, state)) {
+ clips = drm_plane_get_damage_clips(new_plane_state);
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+
+ /* Make sure damage clips are valid and inside the fb. */
+ while (num_clips > 0) {
+ if (clips->x1 >= clips->x2 ||
+ clips->y1 >= clips->y2 ||
+ clips->x1 < 0 ||
+ clips->y1 < 0 ||
+ clips->x2 > fb_width ||
+ clips->y2 > fb_height) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+ plane->base.id, plane->name, clips->x1,
+ clips->y1, clips->x2, clips->y2);
+ return -EINVAL;
+ }
+ clips++;
+ num_clips--;
+ }
+
+ if (plane_switching_crtc(old_plane_state, new_plane_state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
return -EINVAL;
@@ -927,6 +963,8 @@ int
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
+ const struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
struct drm_plane *plane;
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
@@ -934,7 +972,7 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -961,17 +999,19 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- ret = drm_atomic_plane_check(plane, plane_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
plane->base.id, plane->name);
@@ -979,8 +1019,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- ret = drm_atomic_crtc_check(crtc, crtc_state);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id, crtc->name);
@@ -1008,8 +1048,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
if (!state->allow_modeset) {
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
crtc->base.id, crtc->name);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 474b503a73a1..54e2ae614dcc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_damage_helper.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -669,6 +670,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (old_connector_state->link_status !=
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
+
+ if (old_connector_state->max_requested_bpc !=
+ new_connector_state->max_requested_bpc)
+ new_crtc_state->connectors_changed = true;
}
if (funcs->atomic_check)
@@ -858,6 +863,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
if (!funcs || !funcs->atomic_check)
continue;
@@ -1456,6 +1463,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
}
+
+ if (old_state->fake_commit)
+ complete_all(&old_state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
@@ -2213,8 +2223,10 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit)
+ if (old_state->fake_commit) {
complete_all(&old_state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -3119,27 +3131,104 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (!ret)
- ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
-
- if (ret != -EDEADLK)
- break;
-
- drm_modeset_backoff(&ctx);
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
+ ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
/**
+ * drm_atomic_helper_duplicate_state - duplicate an atomic state object
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Makes a copy of the current atomic state by looping over all objects and
+ * duplicating their respective states. This is used for example by suspend/
+ * resume support code to save the state prior to suspend such that it can
+ * be restored upon resume.
+ *
+ * Note that this treats atomic state as persistent between save and restore.
+ * Drivers must make sure that this is possible and won't result in confusion
+ * or erroneous behaviour.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * A pointer to the copy of the atomic state object on success or an
+ * ERR_PTR()-encoded error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
+ */
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int err = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto free;
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ err = PTR_ERR(plane_state);
+ goto free;
+ }
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ err = PTR_ERR(conn_state);
+ drm_connector_list_iter_end(&conn_iter);
+ goto free;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* clear the acquire context so that it isn't accidentally reused */
+ state->acquire_ctx = NULL;
+
+free:
+ if (err < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(err);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
+
+/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
@@ -3170,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
struct drm_atomic_state *state;
int err;
- drm_modeset_acquire_init(&ctx, 0);
+ /* This can never be returned, but it makes the compiler happy */
+ state = ERR_PTR(-EINVAL);
-retry:
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err < 0) {
- state = ERR_PTR(err);
- goto unlock;
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
@@ -3191,13 +3276,10 @@ retry:
}
unlock:
- if (PTR_ERR(state) == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
+ if (err)
+ return ERR_PTR(err);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
@@ -3220,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- int i;
+ int i, ret;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_connector *connector;
@@ -3239,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+
+ state->acquire_ctx = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@@ -3267,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_mode_config_reset(dev);
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
- err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
-out:
- if (err != -EDEADLK)
- break;
-
- drm_modeset_backoff(&ctx);
- }
+ err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return err;
}
@@ -3422,3 +3497,73 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
+
+/**
+ * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @size: size of the tables
+ * @ctx: lock acquire context
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that support color management through the DEGAMMA_LUT/GAMMA_LUT
+ * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
+ * how the atomic color management and gamma tables work.
+ */
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob = NULL;
+ struct drm_color_lut *blob_data;
+ int i, ret = 0;
+ bool replaced;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = ctx;
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Reset DEGAMMA_LUT and CTM properties. */
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
+ crtc_state->color_mgmt_changed |= replaced;
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ drm_property_blob_put(blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 3ba996069d69..60bd7d708e35 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -394,93 +394,6 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
- * drm_atomic_helper_duplicate_state - duplicate an atomic state object
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Makes a copy of the current atomic state by looping over all objects and
- * duplicating their respective states. This is used for example by suspend/
- * resume support code to save the state prior to suspend such that it can
- * be restored upon resume.
- *
- * Note that this treats atomic state as persistent between save and restore.
- * Drivers must make sure that this is possible and won't result in confusion
- * or erroneous behaviour.
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * A pointer to the copy of the atomic state object on success or an
- * ERR_PTR()-encoded error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
- */
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector *conn;
- struct drm_connector_list_iter conn_iter;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- int err = 0;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return ERR_PTR(-ENOMEM);
-
- state->acquire_ctx = ctx;
-
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- err = PTR_ERR(crtc_state);
- goto free;
- }
- }
-
- drm_for_each_plane(plane, dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- err = PTR_ERR(plane_state);
- goto free;
- }
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(conn, &conn_iter) {
- struct drm_connector_state *conn_state;
-
- conn_state = drm_atomic_get_connector_state(state, conn);
- if (IS_ERR(conn_state)) {
- err = PTR_ERR(conn_state);
- drm_connector_list_iter_end(&conn_iter);
- goto free;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-
- /* clear the acquire context so that it isn't accidentally reused */
- state->acquire_ctx = NULL;
-
-free:
- if (err < 0) {
- drm_atomic_state_put(state);
- state = ERR_PTR(err);
- }
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
-
-/**
* __drm_atomic_helper_connector_destroy_state - release connector state
* @state: connector state object to release
*
@@ -516,76 +429,6 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
/**
- * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
- * @crtc: CRTC object
- * @red: red correction table
- * @green: green correction table
- * @blue: green correction table
- * @size: size of the tables
- * @ctx: lock acquire context
- *
- * Implements support for legacy gamma correction table for drivers
- * that support color management through the DEGAMMA_LUT/GAMMA_LUT
- * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
- * how the atomic color management and gamma tables work.
- */
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_property_blob *blob = NULL;
- struct drm_color_lut *blob_data;
- int i, ret = 0;
- bool replaced;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- blob = drm_property_create_blob(dev,
- sizeof(struct drm_color_lut) * size,
- NULL);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- blob = NULL;
- goto fail;
- }
-
- /* Prepare GAMMA_LUT with the legacy values. */
- blob_data = blob->data;
- for (i = 0; i < size; i++) {
- blob_data[i].red = red[i];
- blob_data[i].green = green[i];
- blob_data[i].blue = blue[i];
- }
-
- state->acquire_ctx = ctx;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- /* Reset DEGAMMA_LUT and CTM properties. */
- replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
- crtc_state->color_mgmt_changed |= replaced;
-
- ret = drm_atomic_commit(state);
-
-fail:
- drm_atomic_state_put(state);
- drm_property_blob_put(blob);
- return ret;
-}
-EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
* __drm_atomic_helper_private_duplicate_state - copy atomic private state
* @obj: CRTC object
* @state: new private object state
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index eec396a57b88..c40889888a16 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -517,6 +517,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
@@ -570,6 +572,14 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == config->prop_fb_damage_clips) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->fb_damage_clips,
+ val,
+ -1,
+ sizeof(struct drm_rect),
+ &replaced);
+ return ret;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -625,6 +635,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == config->prop_fb_damage_clips) {
+ *val = (state->fb_damage_clips) ?
+ state->fb_damage_clips->base.id : 0;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -744,6 +757,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
+ } else if (property == connector->max_bpc_property) {
+ state->max_requested_bpc = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -808,6 +823,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
+ } else if (property == connector->max_bpc_property) {
+ *val = state->max_requested_bpc;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fc03d26fcacc..9b2bd28dde0a 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -81,8 +81,7 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
{
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
if (funcs && !try_module_get(funcs->owner))
@@ -229,8 +228,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
- if (buffer->vaddr && dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
if (buffer->gem)
drm_gem_object_put_unlocked(buffer->gem);
@@ -283,9 +281,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = dev->driver->gem_prime_vmap(obj);
- if (!vaddr) {
- ret = -ENOMEM;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto err_delete;
}
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 581cc3788223..07dcf47daafe 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL;
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
@@ -284,13 +280,7 @@ retry:
crtc->gamma_size, &ctx);
out:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ead26bfc30ca..da8ae80c2750 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -932,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* is no longer protected and userspace should take appropriate action
* (whatever that might be).
*
+ * max bpc:
+ * This range property is used by userspace to limit the bit depth. When
+ * used the driver would limit the bpc in accordance with the valid range
+ * supported by the hardware and sink. Drivers to use the function
+ * drm_connector_attach_max_bpc_property() to create and attach the
+ * property to the connector during initialization.
+ *
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
@@ -1699,6 +1706,40 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
+ * drm_connector_attach_max_bpc_property - attach "max bpc" property
+ * @connector: connector to attach max bpc property on.
+ * @min: The minimum bit depth supported by the connector.
+ * @max: The maximum bit depth supported by the connector.
+ *
+ * This is used to add support for limiting the bit depth on a connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ prop = connector->max_bpc_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "max bpc", min, max);
+ if (!prop)
+ return -ENOMEM;
+
+ connector->max_bpc_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, max);
+ connector->state->max_requested_bpc = max;
+ connector->state->max_bpc = max;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
+
+/**
* drm_connector_set_vrr_capable_property - sets the variable refresh rate
* capable property for a connector
* @connector: drm connector
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6f8ddfcfaba5..1593dd6cdfb7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -572,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_connector **connector_set, *connector;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
@@ -601,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
mutex_lock(&crtc->dev->mode_config.mutex);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- connector_set = NULL;
- fb = NULL;
- mode = NULL;
-
- ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -768,13 +761,13 @@ out:
}
kfree(connector_set);
drm_mode_destroy(dev, mode);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+
+ /* In case we need to retry... */
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
+
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
mutex_unlock(&crtc->dev->mode_config.mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
new file mode 100644
index 000000000000..05c8e7267165
--- /dev/null
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ * Rob Clark <robdclark@gmail.com>
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * FB_DAMAGE_CLIPS is an optional plane property which provides a means to
+ * specify a list of damage rectangles on a plane in framebuffer coordinates of
+ * the framebuffer attached to the plane. In current context damage is the area
+ * of plane framebuffer that has changed since last plane update (also called
+ * page-flip), irrespective of whether currently attached framebuffer is same as
+ * framebuffer attached during last plane update or not.
+ *
+ * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
+ * to optimize internally especially for virtual devices where each framebuffer
+ * change needs to be transmitted over network, usb, etc.
+ *
+ * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
+ * ignore damage clips property and in that case driver will do a full plane
+ * update. In case damage clips are provided then it is guaranteed that the area
+ * inside damage clips will be updated to plane. For efficiency driver can do
+ * full update or can update more than specified in damage clips. Since driver
+ * is free to read more, user-space must always render the entire visible
+ * framebuffer. Otherwise there can be corruptions. Also, if a user-space
+ * provides damage clips which doesn't encompass the actual damage to
+ * framebuffer (since last plane update) can result in incorrect rendering.
+ *
+ * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
+ * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
+ * damage clips are not in 16.16 fixed point. Similar to plane src in
+ * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
+ * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
+ * damage clips, it is strongly discouraged.
+ *
+ * Drivers that are interested in damage interface for plane should enable
+ * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
+ * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
+ * rectangles clipped to &drm_plane_state.src.
+ */
+
+static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
+ struct drm_mode_rect *dest,
+ uint32_t num_clips, uint32_t src_inc)
+{
+ while (num_clips > 0) {
+ dest->x1 = src->x1;
+ dest->y1 = src->y1;
+ dest->x2 = src->x2;
+ dest->y2 = src->y2;
+ src += src_inc;
+ dest++;
+ num_clips--;
+ }
+}
+
+/**
+ * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
+ * @plane: Plane on which to enable damage clips property.
+ *
+ * This function lets driver to enable the damage clips property on a plane.
+ */
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
+ 0);
+}
+EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
+
+/**
+ * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
+ * @state: The driver state object.
+ * @plane_state: Plane state for which to verify damage.
+ *
+ * This helper function makes sure that damage from plane state is discarded
+ * for full modeset. If there are more reasons a driver would want to do a full
+ * plane update rather than processing individual damage regions, then those
+ * cases should be taken care of here.
+ *
+ * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
+ * full plane update should happen. It also ensure helper iterator will return
+ * &drm_plane_state.src as damage.
+ */
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ drm_property_blob_put(plane_state->fb_damage_clips);
+ plane_state->fb_damage_clips = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
+
+/**
+ * drm_atomic_helper_dirtyfb - Helper for dirtyfb.
+ * @fb: DRM framebuffer.
+ * @file_priv: Drm file for the ioctl call.
+ * @flags: Dirty fb annotate flags.
+ * @color: Color for annotate fill.
+ * @clips: Dirty region.
+ * @num_clips: Count of clip in clips.
+ *
+ * A helper to implement &drm_framebuffer_funcs.dirty using damage interface
+ * during plane update. If num_clips is 0 then this helper will do a full plane
+ * update. This is the same behaviour expected by DIRTFB IOCTL.
+ *
+ * Note that this helper is blocking implementation. This is what current
+ * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
+ * to rate-limit userspace and make sure its rendering doesn't get ahead of
+ * uploading new data too much.
+ *
+ * Return: Zero on success, negative errno on failure.
+ */
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_property_blob *damage = NULL;
+ struct drm_mode_rect *rects = NULL;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ /*
+ * When called from ioctl, we are interruptable, but not when called
+ * internally (ie. defio worker)
+ */
+ drm_modeset_acquire_init(&ctx,
+ file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
+
+ state = drm_atomic_state_alloc(fb->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ if (clips) {
+ uint32_t inc = 1;
+
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ inc = 2;
+ num_clips /= 2;
+ }
+
+ rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
+ if (!rects) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ convert_clip_rect_to_rect(clips, rects, num_clips, inc);
+ damage = drm_property_create_blob(fb->dev,
+ num_clips * sizeof(*rects),
+ rects);
+ if (IS_ERR(damage)) {
+ ret = PTR_ERR(damage);
+ damage = NULL;
+ goto out;
+ }
+ }
+
+retry:
+ drm_for_each_plane(plane, fb->dev) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->state->fb != fb)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out;
+ }
+
+ drm_property_replace_blob(&plane_state->fb_damage_clips,
+ damage);
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_property_blob_put(damage);
+ kfree(rects);
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
+
+/**
+ * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
+ * @iter: The iterator to initialize.
+ * @old_state: Old plane state for validation.
+ * @new_state: Plane state from which to iterate the damage clips.
+ *
+ * Initialize an iterator, which clips plane damage
+ * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
+ * returns full plane src in case damage is not present because either
+ * user-space didn't sent or driver discarded it (it want to do full plane
+ * update). Currently this iterator returns full plane src in case plane src
+ * changed but that can be changed in future to return damage.
+ *
+ * For the case when plane is not visible or plane update should not happen the
+ * first call to iter_next will return false. Note that this helper use clipped
+ * &drm_plane_state.src, so driver calling this helper should have called
+ * drm_atomic_helper_check_plane_state() earlier.
+ */
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *state)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ if (!state || !state->crtc || !state->fb || !state->visible)
+ return;
+
+ iter->clips = drm_helper_get_plane_damage_clips(state);
+ iter->num_clips = drm_plane_get_damage_clips_count(state);
+
+ /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
+ iter->plane_src.x1 = state->src.x1 >> 16;
+ iter->plane_src.y1 = state->src.y1 >> 16;
+ iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = 0;
+ iter->num_clips = 0;
+ iter->full_update = true;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
+
+/**
+ * drm_atomic_helper_damage_iter_next - Advance the damage iterator.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Since plane src is in 16.16 fixed point and damage clips are whole number,
+ * this iterator round off clips that intersect with plane src. Round down for
+ * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
+ * off for full plane src, in case it's returned as damage. This iterator will
+ * skip damage clips outside of plane src.
+ *
+ * Return: True if the output is valid, false if reached the end.
+ *
+ * If the first call to iterator next returns false then it means no need to
+ * update the plane.
+ */
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect)
+{
+ bool ret = false;
+
+ if (iter->full_update) {
+ *rect = iter->plane_src;
+ iter->full_update = false;
+ return true;
+ }
+
+ while (iter->curr_clip < iter->num_clips) {
+ *rect = iter->clips[iter->curr_clip];
+ iter->curr_clip++;
+
+ if (drm_rect_intersect(rect, &iter->plane_src)) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 373bd4c2b698..f8468eae0503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include "drm_internal.h"
@@ -43,6 +45,93 @@
* Initialization, etc.
**************************************************/
+static int drm_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_master *master;
+
+ mutex_lock(&dev->master_mutex);
+ master = dev->master;
+ seq_printf(m, "%s", dev->driver->name);
+ if (dev->dev)
+ seq_printf(m, " dev=%s", dev_name(dev->dev));
+ if (master && master->unique)
+ seq_printf(m, " master=%s", master->unique);
+ if (dev->unique)
+ seq_printf(m, " unique=%s", dev->unique);
+ seq_printf(m, "\n");
+ mutex_unlock(&dev->master_mutex);
+
+ return 0;
+}
+
+static int drm_clients_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_file *priv;
+ kuid_t uid;
+
+ seq_printf(m,
+ "%20s %5s %3s master a %5s %10s\n",
+ "command",
+ "pid",
+ "dev",
+ "uid",
+ "magic");
+
+ /* dev->filelist is sorted youngest first, but we want to present
+ * oldest first (i.e. kernel, servers, clients), so walk backwardss.
+ */
+ mutex_lock(&dev->filelist_mutex);
+ list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
+ struct task_struct *task;
+
+ rcu_read_lock(); /* locks pid_task()->comm */
+ task = pid_task(priv->pid, PIDTYPE_PID);
+ uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
+ task ? task->comm : "<unknown>",
+ pid_vnr(priv->pid),
+ priv->minor->index,
+ drm_is_current_master(priv) ? 'y' : 'n',
+ priv->authenticated ? 'y' : 'n',
+ from_kuid_munged(seq_user_ns(m), uid),
+ priv->magic);
+ rcu_read_unlock();
+ }
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct seq_file *m = data;
+
+ seq_printf(m, "%6d %8zd %7d %8d\n",
+ obj->name, obj->size,
+ obj->handle_count,
+ kref_read(&obj->refcount));
+ return 0;
+}
+
+static int drm_gem_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ seq_printf(m, " name size handles refcount\n");
+
+ mutex_lock(&dev->object_name_lock);
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
+
+ return 0;
+}
+
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 37c01b6076ec..2d6c491a0542 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1352,3 +1352,95 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
+
+/**
+ * DRM DP Helpers for DSC
+ */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp)
+{
+ u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
+
+ if (is_edp) {
+ /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ } else {
+ /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
+ u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
+
+ if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
+ return 24;
+ if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
+ return 20;
+ if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
+ return 16;
+ if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
+ return 12;
+ if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
+ return 10;
+ if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
+ return 8;
+ if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
+ return 6;
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
+
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
+
+ switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
+ case DP_DSC_LINE_BUF_BIT_DEPTH_9:
+ return 9;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_10:
+ return 10;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_11:
+ return 11;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_12:
+ return 12;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_13:
+ return 13;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_14:
+ return 14;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_15:
+ return 15;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_16:
+ return 16;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_8:
+ return 8;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
+
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3])
+{
+ int num_bpc = 0;
+ u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
+
+ if (color_depth & DP_DSC_12_BPC)
+ dsc_bpc[num_bpc++] = 12;
+ if (color_depth & DP_DSC_10_BPC)
+ dsc_bpc[num_bpc++] = 10;
+ if (color_depth & DP_DSC_8_BPC)
+ dsc_bpc[num_bpc++] = 8;
+
+ return num_bpc;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e2ffecd5e453..12e5e2be7890 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -704,19 +704,6 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This is a compatibility alias for drm_dev_put() and should not be used by new
- * code.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- drm_dev_put(dev);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 000000000000..bc2b23adb072
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+ memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+ pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+ pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg)
+{
+ int i;
+
+ /* Protect against someone accidently changing struct size */
+ BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+ memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+ /* PPS 0 */
+ pps_sdp->pps_payload.dsc_version =
+ dsc_cfg->dsc_version_minor |
+ dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+ /* PPS 1, 2 is 0 */
+
+ /* PPS 3 */
+ pps_sdp->pps_payload.pps_3 =
+ dsc_cfg->line_buf_depth |
+ dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+ /* PPS 4 */
+ pps_sdp->pps_payload.pps_4 =
+ ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT) |
+ dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+ dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+ dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+ /* PPS 5 */
+ pps_sdp->pps_payload.bits_per_pixel_low =
+ (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+ /*
+ * The DSC panel expects the PPS packet to have big endian format
+ * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+ * to big endian format. If format is little endian, it will swap
+ * bytes to convert to Big endian else keep it unchanged.
+ */
+
+ /* PPS 6, 7 */
+ pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+ /* PPS 8, 9 */
+ pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+ /* PPS 10, 11 */
+ pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+ /* PPS 12, 13 */
+ pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+ /* PPS 14, 15 */
+ pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+ /* PPS 16 */
+ pps_sdp->pps_payload.initial_xmit_delay_high =
+ ((dsc_cfg->initial_xmit_delay &
+ DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 17 */
+ pps_sdp->pps_payload.initial_xmit_delay_low =
+ (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+ /* PPS 18, 19 */
+ pps_sdp->pps_payload.initial_dec_delay =
+ cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+ /* PPS 20 is 0 */
+
+ /* PPS 21 */
+ pps_sdp->pps_payload.initial_scale_value =
+ dsc_cfg->initial_scale_value;
+
+ /* PPS 22, 23 */
+ pps_sdp->pps_payload.scale_increment_interval =
+ cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+ /* PPS 24 */
+ pps_sdp->pps_payload.scale_decrement_interval_high =
+ ((dsc_cfg->scale_decrement_interval &
+ DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 25 */
+ pps_sdp->pps_payload.scale_decrement_interval_low =
+ (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+ /* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+ /* PPS 27 */
+ pps_sdp->pps_payload.first_line_bpg_offset =
+ dsc_cfg->first_line_bpg_offset;
+
+ /* PPS 28, 29 */
+ pps_sdp->pps_payload.nfl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+ /* PPS 30, 31 */
+ pps_sdp->pps_payload.slice_bpg_offset =
+ cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+ /* PPS 32, 33 */
+ pps_sdp->pps_payload.initial_offset =
+ cpu_to_be16(dsc_cfg->initial_offset);
+
+ /* PPS 34, 35 */
+ pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+ /* PPS 36 */
+ pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+ /* PPS 37 */
+ pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+ /* PPS 38, 39 */
+ pps_sdp->pps_payload.rc_model_size =
+ cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+ /* PPS 40 */
+ pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+ /* PPS 41 */
+ pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ dsc_cfg->rc_quant_incr_limit0;
+
+ /* PPS 42 */
+ pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ dsc_cfg->rc_quant_incr_limit1;
+
+ /* PPS 43 */
+ pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+ /* PPS 44 - 57 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+ pps_sdp->pps_payload.rc_buf_thresh[i] =
+ dsc_cfg->rc_buf_thresh[i];
+
+ /* PPS 58 - 87 */
+ /*
+ * For DSC sink programming the RC Range parameter fields
+ * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+ */
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ ((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ }
+
+ /* PPS 88 */
+ pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+ /* PPS 89 */
+ pps_sdp->pps_payload.second_line_bpg_offset =
+ dsc_cfg->second_line_bpg_offset;
+
+ /* PPS 90, 91 */
+ pps_sdp->pps_payload.nsl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+ /* PPS 92, 93 */
+ pps_sdp->pps_payload.second_line_offset_adj =
+ cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+ /* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9a69ad7e9f3b..5e9ca6f96379 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index f523948c82b1..d90ee03a84c6 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -224,6 +224,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 512078ebd97b..8b55ece97967 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_close_object)
+ if (obj->funcs && obj->funcs->close)
+ obj->funcs->close(obj, file_priv);
+ else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
if (ret)
goto err_remove;
- if (dev->driver->gem_open_object) {
+ if (obj->funcs && obj->funcs->open) {
+ ret = obj->funcs->open(obj, file_priv);
+ if (ret)
+ goto err_revoke;
+ } else if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret)
goto err_revoke;
@@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
+ if (obj->funcs) {
+ obj->funcs->free(obj);
+ } else if (dev->driver->gem_free_object_unlocked) {
dev->driver->gem_free_object_unlocked(obj);
} else if (dev->driver->gem_free_object) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
- kref_put(&obj->refcount, drm_gem_object_free);
- } else {
+ if (dev->driver->gem_free_object) {
might_lock(&dev->struct_mutex);
if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
&dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
+ } else {
+ kref_put(&obj->refcount, drm_gem_object_free);
}
}
EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (!dev->driver->gem_vm_ops)
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->dev->driver->gem_print_info)
+ if (obj->funcs && obj->funcs->print_info)
+ obj->funcs->print_info(p, indent, obj);
+ else if (obj->dev->driver->gem_print_info)
obj->dev->driver->gem_print_info(p, indent, obj);
}
+
+/**
+ * drm_gem_pin - Pin backing buffer in memory
+ * @obj: GEM object
+ *
+ * Make sure the backing buffer is pinned in memory.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->pin)
+ return obj->funcs->pin(obj);
+ else if (obj->dev->driver->gem_prime_pin)
+ return obj->dev->driver->gem_prime_pin(obj);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_pin);
+
+/**
+ * drm_gem_unpin - Unpin backing buffer from memory
+ * @obj: GEM object
+ *
+ * Relax the requirement that the backing buffer is pinned in memory.
+ */
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->unpin)
+ obj->funcs->unpin(obj);
+ else if (obj->dev->driver->gem_prime_unpin)
+ obj->dev->driver->gem_prime_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_unpin);
+
+/**
+ * drm_gem_vmap - Map buffer into kernel virtual address space
+ * @obj: GEM object
+ *
+ * Returns:
+ * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+void *drm_gem_vmap(struct drm_gem_object *obj)
+{
+ void *vaddr;
+
+ if (obj->funcs && obj->funcs->vmap)
+ vaddr = obj->funcs->vmap(obj);
+ else if (obj->dev->driver->gem_prime_vmap)
+ vaddr = obj->dev->driver->gem_prime_vmap(obj);
+ else
+ vaddr = ERR_PTR(-EOPNOTSUPP);
+
+ if (!vaddr)
+ vaddr = ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_vmap);
+
+/**
+ * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
+ * @obj: GEM object
+ * @vaddr: Virtual address (can be NULL)
+ */
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ if (!vaddr)
+ return;
+
+ if (obj->funcs && obj->funcs->vunmap)
+ obj->funcs->vunmap(obj, vaddr);
+ else if (obj->dev->driver->gem_prime_vunmap)
+ obj->dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d2ced882b66..cc26625b4b33 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,6 +176,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
+ * If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
* &drm_driver.gem_free_object_unlocked callback.
*/
@@ -189,6 +190,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
+ if (cma_obj->vaddr)
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -575,3 +578,86 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* Nothing to do */
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
+
+static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
+ .free = drm_gem_cma_free_object,
+ .print_info = drm_gem_cma_print_info,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = drm_gem_cma_prime_vmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
+/**
+ * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
+ * default function table
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This sets the GEM object functions to the default CMA helper functions.
+ * This function can be used as the &drm_driver.gem_create_object callback.
+ *
+ * Returns:
+ * A pointer to a allocated GEM object or an error pointer on failure.
+ */
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return NULL;
+
+ cma_obj->base.funcs = &drm_cma_gem_default_funcs;
+
+ return &cma_obj->base;
+}
+EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
+
+/**
+ * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ * scatter/gather table and get the virtual address of the buffer
+ * @dev: DRM device
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table using
+ * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a CMA GEM object always has its virtual
+ * address set. This address is released when the object is freed.
+ *
+ * This function can be used as the &drm_driver.gem_prime_import_sg_table
+ * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
+ * the necessary DRM driver operations.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj;
+ void *vaddr;
+
+ vaddr = dma_buf_vmap(attach->dmabuf);
+ if (!vaddr) {
+ DRM_ERROR("Failed to vmap PRIME buffer\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ dma_buf_vunmap(attach->dmabuf, vaddr);
+ return obj;
+ }
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+ cma_obj->vaddr = vaddr;
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
deleted file mode 100644
index 6b68e9088436..000000000000
--- a/drivers/gpu/drm/drm_info.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master;
-
- mutex_lock(&dev->master_mutex);
- master = dev->master;
- seq_printf(m, "%s", dev->driver->name);
- if (dev->dev)
- seq_printf(m, " dev=%s", dev_name(dev->dev));
- if (master && master->unique)
- seq_printf(m, " master=%s", master->unique);
- if (dev->unique)
- seq_printf(m, " unique=%s", dev->unique);
- seq_printf(m, "\n");
- mutex_unlock(&dev->master_mutex);
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
- kuid_t uid;
-
- seq_printf(m,
- "%20s %5s %3s master a %5s %10s\n",
- "command",
- "pid",
- "dev",
- "uid",
- "magic");
-
- /* dev->filelist is sorted youngest first, but we want to present
- * oldest first (i.e. kernel, servers, clients), so walk backwardss.
- */
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
- struct task_struct *task;
-
- rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_PID);
- uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
- task ? task->comm : "<unknown>",
- pid_vnr(priv->pid),
- priv->minor->index,
- drm_is_current_master(priv) ? 'y' : 'n',
- priv->authenticated ? 'y' : 'n',
- from_kuid_munged(seq_user_ns(m), uid),
- priv->magic);
- rcu_read_unlock();
- }
- mutex_unlock(&dev->filelist_mutex);
- return 0;
-}
-
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- obj->handle_count,
- kref_read(&obj->refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
-
- mutex_lock(&dev->object_name_lock);
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- mutex_unlock(&dev->object_name_lock);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 0c4eb4a9ab31..c7a7d7ce5d1c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,11 +56,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
-/* drm_info.c */
-int drm_name_info(struct seq_file *m, void *data);
-int drm_clients_info(struct seq_file *m, void* data);
-int drm_gem_name_info(struct seq_file *m, void *data);
-
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 5670c67f28d4..703bfce975bb 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,6 +297,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
+ 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_damage_clips = prop;
+
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 8a5100685875..51f534db9107 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -56,6 +56,10 @@
* drm_modeset_drop_locks(ctx);
* drm_modeset_acquire_fini(ctx);
*
+ * For convenience this control flow is implemented in
+ * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
+ * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
+ *
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
* by passing a NULL instead of ctx in the drm_modeset_lock() call or
@@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock);
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
+ * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
+ *
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ab4e70e63f6e..52e445bb1aa5 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -63,7 +63,7 @@ static const struct drm_dmi_panel_orientation_data gpd_win2 = {
.width = 720,
.height = 1280,
.bios_dates = (const char * const []){
- "12/07/2017", "05/24/2018", NULL },
+ "12/07/2017", "05/24/2018", "06/29/2018", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 679455e36829..5f650d8fc66b 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -767,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
- if (ret)
- goto fail;
+ DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (drm_drv_uses_atomic_modeset(plane->dev))
ret = __setplane_atomic(plane, crtc, fb,
@@ -782,14 +779,7 @@ retry:
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
-fail:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 8d54d51a6b6b..231e3f6d5f41 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
if (!prime_attach)
@@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
prime_attach->dir = DMA_NONE;
attach->priv = prime_attach;
- if (!dev->driver->gem_prime_pin)
- return 0;
-
- return dev->driver->gem_prime_pin(obj);
+ return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
if (prime_attach) {
struct sg_table *sgt = prime_attach->sgt;
@@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
attach->priv = NULL;
}
- if (dev->driver->gem_prime_unpin)
- dev->driver->gem_prime_unpin(obj);
+ drm_gem_unpin(obj);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ if (obj->funcs)
+ sgt = obj->funcs->get_sg_table(obj);
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
@@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
+ void *vaddr;
- if (dev->driver->gem_prime_vmap)
- return dev->driver->gem_prime_vmap(obj);
- else
- return NULL;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr))
+ vaddr = NULL;
+
+ return vaddr;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -426,10 +424,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
- if (dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(obj, vaddr);
+ drm_gem_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -529,7 +525,12 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (obj->funcs && obj->funcs->export)
+ dmabuf = obj->funcs->export(obj, flags);
+ else if (dev->driver->gem_prime_export)
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ else
+ dmabuf = drm_gem_prime_export(dev, obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -649,6 +650,52 @@ out_unlock:
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
/**
+ * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
+ * @obj: GEM object
+ * @vma: Virtual address range
+ *
+ * This function sets up a userspace mapping for PRIME exported buffers using
+ * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
+ * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
+ * called to set up the mapping.
+ *
+ * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
+ */
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_file *priv;
+ struct file *fil;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ fil = kzalloc(sizeof(*fil), GFP_KERNEL);
+ if (!priv || !fil) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Used by drm_gem_mmap() to lookup the GEM object */
+ priv->minor = obj->dev->primary;
+ fil->private_data = priv;
+
+ ret = drm_vma_node_allow(&obj->vma_node, priv);
+ if (ret)
+ goto out;
+
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->dev->driver->fops->mmap(fil, vma);
+
+ drm_vma_node_revoke(&obj->vma_node, priv);
+out:
+ kfree(priv);
+ kfree(fil);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_mmap);
+
+/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
@@ -762,7 +809,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
/* never seen this one, need to import */
mutex_lock(&dev->object_name_lock);
- obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (dev->driver->gem_prime_import)
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ else
+ obj = drm_gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index da8175d9c6ff..db30a0e89db8 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,47 +56,6 @@
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
-/* merge normal syncobj to timeline syncobj, the point interval is 1 */
-#define DRM_SYNCOBJ_BINARY_POINT 1
-
-struct drm_syncobj_stub_fence {
- struct dma_fence base;
- spinlock_t lock;
-};
-
-static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
-{
- return "syncobjstub";
-}
-
-static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
- .get_driver_name = drm_syncobj_stub_fence_get_name,
- .get_timeline_name = drm_syncobj_stub_fence_get_name,
-};
-
-struct drm_syncobj_signal_pt {
- struct dma_fence_array *fence_array;
- u64 value;
- struct list_head list;
-};
-
-static DEFINE_SPINLOCK(signaled_fence_lock);
-static struct dma_fence signaled_fence;
-
-static struct dma_fence *drm_syncobj_get_stub_fence(void)
-{
- spin_lock(&signaled_fence_lock);
- if (!signaled_fence.ops) {
- dma_fence_init(&signaled_fence,
- &drm_syncobj_stub_fence_ops,
- &signaled_fence_lock,
- 0, 0);
- dma_fence_signal_locked(&signaled_fence);
- }
- spin_unlock(&signaled_fence_lock);
-
- return dma_fence_get(&signaled_fence);
-}
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@@ -123,27 +82,6 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
-static struct dma_fence *
-drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj,
- uint64_t point)
-{
- struct drm_syncobj_signal_pt *signal_pt;
-
- if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) &&
- (point <= syncobj->timeline))
- return drm_syncobj_get_stub_fence();
-
- list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) {
- if (point > signal_pt->value)
- continue;
- if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) &&
- (point != signal_pt->value))
- continue;
- return dma_fence_get(&signal_pt->fence_array->base);
- }
- return NULL;
-}
-
static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
@@ -152,273 +90,101 @@ static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
list_add_tail(&cb->node, &syncobj->cb_list);
}
-static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
- struct dma_fence **fence,
- struct drm_syncobj_cb *cb,
- drm_syncobj_func_t func)
+static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
+ struct dma_fence **fence,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
{
- u64 pt_value = 0;
-
- WARN_ON(*fence);
-
- if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
- /*BINARY syncobj always wait on last pt */
- pt_value = syncobj->signal_point;
+ int ret;
- if (pt_value == 0)
- pt_value += DRM_SYNCOBJ_BINARY_POINT;
- }
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
- mutex_lock(&syncobj->cb_mutex);
- spin_lock(&syncobj->pt_lock);
- *fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value);
- spin_unlock(&syncobj->pt_lock);
- if (!*fence)
+ spin_lock(&syncobj->lock);
+ /* We've already tried once to get a fence and failed. Now that we
+ * have the lock, try one more time just to be sure we don't add a
+ * callback when a fence has already been set.
+ */
+ if (syncobj->fence) {
+ *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock)));
+ ret = 1;
+ } else {
+ *fence = NULL;
drm_syncobj_add_callback_locked(syncobj, cb, func);
- mutex_unlock(&syncobj->cb_mutex);
-}
-
-static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
- struct drm_syncobj_cb *cb)
-{
- mutex_lock(&syncobj->cb_mutex);
- list_del_init(&cb->node);
- mutex_unlock(&syncobj->cb_mutex);
-}
+ ret = 0;
+ }
+ spin_unlock(&syncobj->lock);
-static void drm_syncobj_init(struct drm_syncobj *syncobj)
-{
- spin_lock(&syncobj->pt_lock);
- syncobj->timeline_context = dma_fence_context_alloc(1);
- syncobj->timeline = 0;
- syncobj->signal_point = 0;
- init_waitqueue_head(&syncobj->wq);
-
- INIT_LIST_HEAD(&syncobj->signal_pt_list);
- spin_unlock(&syncobj->pt_lock);
+ return ret;
}
-static void drm_syncobj_fini(struct drm_syncobj *syncobj)
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
{
- struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp;
-
- spin_lock(&syncobj->pt_lock);
- list_for_each_entry_safe(signal_pt, tmp,
- &syncobj->signal_pt_list, list) {
- list_del(&signal_pt->list);
- dma_fence_put(&signal_pt->fence_array->base);
- kfree(signal_pt);
- }
- spin_unlock(&syncobj->pt_lock);
+ spin_lock(&syncobj->lock);
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ spin_unlock(&syncobj->lock);
}
-static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj,
- struct dma_fence *fence,
- u64 point)
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
{
- struct drm_syncobj_signal_pt *signal_pt =
- kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL);
- struct drm_syncobj_signal_pt *tail_pt;
- struct dma_fence **fences;
- int num_fences = 0;
- int ret = 0, i;
-
- if (!signal_pt)
- return -ENOMEM;
- if (!fence)
- goto out;
-
- fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL);
- if (!fences) {
- ret = -ENOMEM;
- goto out;
- }
- fences[num_fences++] = dma_fence_get(fence);
- /* timeline syncobj must take this dependency */
- if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
- spin_lock(&syncobj->pt_lock);
- if (!list_empty(&syncobj->signal_pt_list)) {
- tail_pt = list_last_entry(&syncobj->signal_pt_list,
- struct drm_syncobj_signal_pt, list);
- fences[num_fences++] =
- dma_fence_get(&tail_pt->fence_array->base);
- }
- spin_unlock(&syncobj->pt_lock);
- }
- signal_pt->fence_array = dma_fence_array_create(num_fences, fences,
- syncobj->timeline_context,
- point, false);
- if (!signal_pt->fence_array) {
- ret = -ENOMEM;
- goto fail;
- }
-
- spin_lock(&syncobj->pt_lock);
- if (syncobj->signal_point >= point) {
- DRM_WARN("A later signal is ready!");
- spin_unlock(&syncobj->pt_lock);
- goto exist;
- }
- signal_pt->value = point;
- list_add_tail(&signal_pt->list, &syncobj->signal_pt_list);
- syncobj->signal_point = point;
- spin_unlock(&syncobj->pt_lock);
- wake_up_all(&syncobj->wq);
-
- return 0;
-exist:
- dma_fence_put(&signal_pt->fence_array->base);
-fail:
- for (i = 0; i < num_fences; i++)
- dma_fence_put(fences[i]);
- kfree(fences);
-out:
- kfree(signal_pt);
- return ret;
+ spin_lock(&syncobj->lock);
+ list_del_init(&cb->node);
+ spin_unlock(&syncobj->lock);
}
-static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj)
-{
- struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt;
-
- spin_lock(&syncobj->pt_lock);
- tail_pt = list_last_entry(&syncobj->signal_pt_list,
- struct drm_syncobj_signal_pt,
- list);
- list_for_each_entry_safe(signal_pt, tmp,
- &syncobj->signal_pt_list, list) {
- if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY &&
- signal_pt == tail_pt)
- continue;
- if (dma_fence_is_signaled(&signal_pt->fence_array->base)) {
- syncobj->timeline = signal_pt->value;
- list_del(&signal_pt->list);
- dma_fence_put(&signal_pt->fence_array->base);
- kfree(signal_pt);
- } else {
- /*signal_pt is in order in list, from small to big, so
- * the later must not be signal either */
- break;
- }
- }
-
- spin_unlock(&syncobj->pt_lock);
-}
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
- * @point: timeline point
* @fence: fence to install in sync file.
*
- * This replaces the fence on a sync object, or a timeline point fence.
+ * This replaces the fence on a sync object.
*/
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
- u64 point,
struct dma_fence *fence)
{
- u64 pt_value = point;
-
- drm_syncobj_garbage_collection(syncobj);
- if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
- if (!fence) {
- drm_syncobj_fini(syncobj);
- drm_syncobj_init(syncobj);
- return;
- }
- pt_value = syncobj->signal_point +
- DRM_SYNCOBJ_BINARY_POINT;
- }
- drm_syncobj_create_signal_pt(syncobj, fence, pt_value);
- if (fence) {
- struct drm_syncobj_cb *cur, *tmp;
- LIST_HEAD(cb_list);
+ struct dma_fence *old_fence;
+ struct drm_syncobj_cb *cur, *tmp;
- mutex_lock(&syncobj->cb_mutex);
+ if (fence)
+ dma_fence_get(fence);
+
+ spin_lock(&syncobj->lock);
+
+ old_fence = rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock));
+ rcu_assign_pointer(syncobj->fence, fence);
+
+ if (fence != old_fence) {
list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
list_del_init(&cur->node);
cur->func(syncobj, cur);
}
- mutex_unlock(&syncobj->cb_mutex);
}
-}
-EXPORT_SYMBOL(drm_syncobj_replace_fence);
-
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
-{
- struct drm_syncobj_stub_fence *fence;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (fence == NULL)
- return -ENOMEM;
- spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
- &fence->lock, 0, 0);
- dma_fence_signal(&fence->base);
+ spin_unlock(&syncobj->lock);
- drm_syncobj_replace_fence(syncobj, 0, &fence->base);
-
- dma_fence_put(&fence->base);
-
- return 0;
-}
-
-static int
-drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags,
- struct dma_fence **fence)
-{
- int ret = 0;
-
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
- ret = wait_event_interruptible(syncobj->wq,
- point <= syncobj->signal_point);
- if (ret < 0)
- return ret;
- }
- spin_lock(&syncobj->pt_lock);
- *fence = drm_syncobj_find_signal_pt_for_point(syncobj, point);
- if (!*fence)
- ret = -EINVAL;
- spin_unlock(&syncobj->pt_lock);
- return ret;
+ dma_fence_put(old_fence);
}
+EXPORT_SYMBOL(drm_syncobj_replace_fence);
/**
- * drm_syncobj_search_fence - lookup and reference the fence in a sync object or
- * in a timeline point
- * @syncobj: sync object pointer
- * @point: timeline point
- * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
- * @fence: out parameter for the fence
- *
- * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block
- * here until specific timeline points is reached.
- * if not, you need a submit thread and block in userspace until all future
- * timeline points have materialized, only then you can submit to the kernel,
- * otherwise, function will fail to return fence.
+ * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
+ * @syncobj: sync object to assign the fence on
*
- * Returns 0 on success or a negative error value on failure. On success @fence
- * contains a reference to the fence, which must be released by calling
- * dma_fence_put().
+ * Assign a already signaled stub fence to the sync object.
*/
-int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
- u64 flags, struct dma_fence **fence)
+static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- u64 pt_value = point;
+ struct dma_fence *fence = dma_fence_get_stub();
- if (!syncobj)
- return -ENOENT;
-
- drm_syncobj_garbage_collection(syncobj);
- if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
- /*BINARY syncobj always wait on last pt */
- pt_value = syncobj->signal_point;
-
- if (pt_value == 0)
- pt_value += DRM_SYNCOBJ_BINARY_POINT;
- }
- return drm_syncobj_point_get(syncobj, pt_value, flags, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
+ dma_fence_put(fence);
}
-EXPORT_SYMBOL(drm_syncobj_search_fence);
/**
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
@@ -429,7 +195,7 @@ EXPORT_SYMBOL(drm_syncobj_search_fence);
* @fence: out parameter for the fence
*
* This is just a convenience function that combines drm_syncobj_find() and
- * drm_syncobj_lookup_fence().
+ * drm_syncobj_fence_get().
*
* Returns 0 on success or a negative error value on failure. On success @fence
* contains a reference to the fence, which must be released by calling
@@ -440,11 +206,16 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
- int ret;
+ int ret = 0;
- ret = drm_syncobj_search_fence(syncobj, point, flags, fence);
- if (syncobj)
- drm_syncobj_put(syncobj);
+ if (!syncobj)
+ return -ENOENT;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (!*fence) {
+ ret = -EINVAL;
+ }
+ drm_syncobj_put(syncobj);
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
@@ -460,7 +231,7 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- drm_syncobj_fini(syncobj);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
@@ -480,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
{
- int ret;
struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -489,24 +259,13 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
kref_init(&syncobj->refcount);
INIT_LIST_HEAD(&syncobj->cb_list);
- spin_lock_init(&syncobj->pt_lock);
- mutex_init(&syncobj->cb_mutex);
- if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)
- syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE;
- else
- syncobj->type = DRM_SYNCOBJ_TYPE_BINARY;
- drm_syncobj_init(syncobj);
-
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
- ret = drm_syncobj_assign_null_handle(syncobj);
- if (ret < 0) {
- drm_syncobj_put(syncobj);
- return ret;
- }
- }
+ spin_lock_init(&syncobj->lock);
+
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
+ drm_syncobj_assign_null_handle(syncobj);
if (fence)
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj;
return 0;
@@ -691,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
@@ -778,8 +537,7 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
/* no valid flags yet */
- if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED |
- DRM_SYNCOBJ_CREATE_TYPE_TIMELINE))
+ if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create_as_handle(file_private,
@@ -872,8 +630,9 @@ static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait =
container_of(cb, struct syncobj_wait_entry, syncobj_cb);
- drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence);
-
+ /* This happens inside the syncobj lock */
+ wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock)));
wake_up_process(wait->task);
}
@@ -899,8 +658,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
signaled_count = 0;
for (i = 0; i < count; ++i) {
entries[i].task = current;
- drm_syncobj_search_fence(syncobjs[i], 0, 0,
- &entries[i].fence);
+ entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
if (!entries[i].fence) {
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
continue;
@@ -931,9 +689,6 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
- if (entries[i].fence)
- continue;
-
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
@@ -1165,13 +920,12 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
- for (i = 0; i < args->count_handles; i++) {
- drm_syncobj_fini(syncobjs[i]);
- drm_syncobj_init(syncobjs[i]);
- }
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+
drm_syncobj_array_free(syncobjs, args->count_handles);
- return ret;
+ return 0;
}
int
@@ -1199,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
- for (i = 0; i < args->count_handles; i++) {
- ret = drm_syncobj_assign_null_handle(syncobjs[i]);
- if (ret < 0)
- break;
- }
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_assign_null_handle(syncobjs[i]);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 83c1f46670bf..52802e6049e0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -550,7 +550,7 @@ out_register:
out_bind:
kfree(priv);
out_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -567,7 +567,7 @@ static void etnaviv_unbind(struct device *dev)
drm->dev_private = NULL;
kfree(priv);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops etnaviv_master_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index fd6bad2100cf..3fbb4855396c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -135,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -183,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27be3cc..3691a140c950 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@ config DRM_EXYNOS
if DRM_EXYNOS
-config DRM_EXYNOS_IOMMU
- bool
- depends on EXYNOS_IOMMU
- default y
-
comment "CRTCs"
config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146bbf4f5..2fd2f3ee4fcf 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index aef487dd8731..5b4e0e8b23bc 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
@@ -252,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 win_alpha = alpha >> 8;
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_BLD_PIX_F;
+ val |= WINCONx_ALPHA_MUL_F;
+ break;
+ }
+ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_B_F(win_alpha);
+ decon_set_bits(ctx, DECON_VIDOSDxC(win),
+ VIDOSDxC_ALPHA0_RGB_MASK, val);
+ decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+ }
+}
+
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
unsigned long val;
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
val = readl(ctx->addr + DECON_WINCONx(win));
val &= WINCONx_ENWIN_F;
@@ -279,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
case DRM_FORMAT_ARGB8888:
default:
val |= WINCONx_BPPMODE_32BPP_A8888;
- val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
@@ -298,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
+ decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
- writel(val, ctx->addr + DECON_WINCONx(win));
+ if (win > 0) {
+ decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+ }
}
static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -552,6 +628,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[win].zpos = win - ctx->first_win;
ctx->configs[win].type = decon_win_types[win];
+ ctx->configs[win].capabilities = capabilities[win];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
&ctx->configs[win]);
@@ -569,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -579,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd000eb09..381aa3d60e37 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon7.h"
/*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 000000000000..3432c5ee9f0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) ({ NULL; })
+#define arm_iommu_attach_device(...) ({ -ENODEV; })
+#define arm_iommu_release_mapping(...) ({ })
+#define arm_iommu_detach_device(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
+
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ if (to_dma_iommu_mapping(subdrv_dev))
+ arm_iommu_detach_device(subdrv_dev);
+
+ ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = iommu_attach_device(priv->mapping, subdrv_dev);
+ }
+
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ arm_iommu_detach_device(subdrv_dev);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ iommu_detach_device(priv->mapping, subdrv_dev);
+
+ clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!priv->dma_dev) {
+ priv->dma_dev = dev;
+ DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+ dev_name(dev));
+ }
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return 0;
+
+ if (!priv->mapping) {
+ void *mapping;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ priv->mapping = mapping;
+ }
+
+ return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+ if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return;
+
+ arm_iommu_release_mapping(priv->mapping);
+ priv->mapping = NULL;
+ priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf4550a..2c75e789b2a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
#include "exynos_drm_ipp.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
-#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
-#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
static struct exynos_drm_driver_info exynos_drm_drivers[] = {
{
DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
dev_set_drvdata(dev, drm);
drm->dev_private = (void *)private;
- /* the first real CRTC device is used for all dma mapping operations */
- private->dma_dev = exynos_drm_get_dma_device();
- if (!private->dma_dev) {
- DRM_ERROR("no device found for DMA mapping operations.\n");
- ret = -ENODEV;
- goto err_free_private;
- }
- DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
- dev_name(private->dma_dev));
-
- /* create common IOMMU mapping for all devices attached to Exynos DRM */
- ret = drm_create_iommu_mapping(drm);
- if (ret < 0) {
- DRM_ERROR("failed to create iommu mapping.\n");
- goto err_free_private;
- }
-
drm_mode_config_init(drm);
exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
-err_free_private:
+ exynos_drm_cleanup_dma(drm);
kfree(private);
err_free_drm:
drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
+ exynos_drm_cleanup_dma(drm);
kfree(drm->dev_private);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e61e707f955..71eb240bc1f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -214,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382e19ee..31eb538a44ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 01d182289efa..ce9604ca8041 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670bb5f8..90dfea0aec4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f56935a46b..e3d6a8584715 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -1011,7 +1010,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1020,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2014bb..24c536d6d9cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_iommu_attach_device(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- drm_iommu_detach_device(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85feb68..df66c383a877 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46bfce8..f048d97fe9e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f373702414e..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
- if (!dev->dma_parms)
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
- EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
-
- if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
- dev_name(subdrv_dev));
- return -EINVAL;
- }
-
- ret = configure_dma_max_seg_size(subdrv_dev);
- if (ret)
- return ret;
-
- ret = __exynos_iommu_attach(priv, subdrv_dev);
- if (ret)
- clear_dma_max_seg_size(subdrv_dev);
-
- return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_detach(priv, subdrv_dev);
- clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee5f15a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START 0x20000000
-#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
- size);
- return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- if (dev->archdata.mapping)
- arm_iommu_detach_device(dev);
-
- return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
- return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- return iommu_attach_device(domain, dev);
- return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
- struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68429b9..8d67b2a54be3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774e817d..71270efa64f3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
#include "regs-scaler.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecbc503b..0573eab0e190 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
{
+ enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
u32 val;
- switch (height) {
- case 480:
- case 576:
- val = MXR_CFG_RGB601_0_255;
- break;
- case 720:
- case 1080:
- default:
- val = MXR_CFG_RGB709_16_235;
+ if (mode->vdisplay < 720) {
+ val = MXR_CFG_RGB601;
+ } else {
+ val = MXR_CFG_RGB709;
+
/* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
- break;
}
+ if (range == HDMI_QUANTIZATION_RANGE_FULL)
+ val |= MXR_CFG_QUANT_RANGE_FULL;
+ else
+ val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode);
mixer_run(ctx);
}
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e47945e..63db6974bf14 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_ALPHA_MUL_F (1 << 7)
#define WINCONx_BLD_PIX_F (1 << 6)
#define WINCONx_BPPMODE_MASK (0xf << 2)
#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
@@ -116,11 +117,15 @@
#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
#define WINCONx_ALPHA_SEL_F (1 << 1)
#define WINCONx_ENWIN_F (1 << 0)
+#define WINCONx_BLEND_MODE_MASK (0xc2)
/* SHADOWCON */
#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK (0xffffff)
+
/* VIDOSDxD */
#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
#define CRCCTRL_CRCEN (0x1 << 0)
#define CRCCTRL_MASK (0x7)
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO 0x0
+#define BLENDERQ_ONE 0x1
+#define BLENDERQ_ALPHA_A 0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A 0x3
+#define BLENDERQ_ALPHA0 0x6
+#define BLENDERQ_Q_FUNC_F(n) (n << 18)
+#define BLENDERQ_P_FUNC_F(n) (n << 12)
+#define BLENDERQ_B_FUNC_F(n) (n << 6)
+#define BLENDERQ_A_FUNC_F(n) (n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194a07bf..5ff095b0c1b3 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
/* bits for MXR_CFG */
#define MXR_CFG_LAYER_UPDATE (1 << 31)
#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255 (0 << 9)
-#define MXR_CFG_RGB601_16_235 (1 << 9)
-#define MXR_CFG_RGB709_0_255 (2 << 9)
-#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL (0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED (1 << 9)
+#define MXR_CFG_RGB601 (0 << 10)
+#define MXR_CFG_RGB709 (1 << 10)
+
#define MXR_CFG_RGB_FMT_MASK 0x600
#define MXR_CFG_OUT_YUV444 (0 << 8)
#define MXR_CFG_OUT_RGB888 (1 << 8)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f13ad4..19b5fe5016bf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
+ i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_cdclk.o \
intel_color.o \
+ intel_combo_phy.o \
+ intel_connector.o \
intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
intel_frontbuffer.o \
intel_hdcp.o \
intel_hotplug.o \
- intel_modes.o \
intel_overlay.o \
intel_psr.o \
+ intel_quirks.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
+ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \
@@ -153,14 +157,17 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o \
vlv_dsi.o \
- vlv_dsi_pll.o
+ vlv_dsi_pll.o \
+ intel_vdsc.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
- selftests/igt_flush_test.o
+ selftests/igt_flush_test.o \
+ selftests/igt_reset.o \
+ selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003d6dd2..b8fbe3fabea3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
+static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
+{
+ struct intel_vgpu_mm *mm = workload->shadow_mm;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ int i = 0;
+
+ if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
+ return -1;
+
+ if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+ } else {
+ for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+ px_dma(ppgtt->pdp.page_directory[i]) =
+ mm->ppgtt_mm.shadow_pdps[i];
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return ret;
+ }
+
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a12889..38dcee1ca062 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error = file->private_data;
- struct drm_i915_error_state_buf str;
+ struct i915_gpu_state *error;
ssize_t ret;
- loff_t tmp;
+ void *buf;
+ error = file->private_data;
if (!error)
return 0;
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
- if (ret)
- return ret;
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- ret = i915_error_state_to_str(&str, error);
- if (ret)
+ ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
goto out;
- tmp = 0;
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
- if (ret < 0)
- goto out;
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
- *pos = str.start + ret;
out:
- i915_error_state_buf_release(&str);
+ kfree(buf);
return ret;
}
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN5(dev_priv))
return -ENODEV;
+ intel_runtime_pm_get(dev_priv);
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u32 act_freq = rps->cur_freq;
struct drm_file *file;
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ mutex_lock(&dev_priv->pcu_lock);
+ act_freq = vlv_punit_read(dev_priv,
+ PUNIT_REG_GPU_FREQ_STS);
+ act_freq = (act_freq >> 8) & 0xff;
+ mutex_unlock(&dev_priv->pcu_lock);
+ } else {
+ act_freq = intel_get_cagf(dev_priv,
+ I915_READ(GEN6_RPSTAT1));
+ }
+ intel_runtime_pm_put(dev_priv);
+ }
+
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
- seq_printf(m, "Frequency requested %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(dev_priv, rps->cur_freq),
+ intel_gpu_freq(dev_priv, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, rps->min_freq),
intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_KABYLAKE(dev_priv) ||
- (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(SKL_CSR_DC3_DC5_COUNT));
+ if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+ goto out;
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT));
+ if (!IS_GEN9_LP(dev_priv))
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
- } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(BXT_CSR_DC3_DC5_COUNT));
- }
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n",
- connector->display_info.cea_rev);
- }
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tname: %s\n", connector->display_info.name);
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
if (!intel_encoder)
return;
@@ -3355,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
- int i;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
- for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "Workarounds applied: %u\n", wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+ i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
return 0;
}
@@ -3421,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -4172,6 +4195,7 @@ i915_drop_caches_set(void *data, u64 val)
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
+ intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@@ -4181,7 +4205,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- return ret;
+ goto out;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4213,8 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (ret == 0 && val & DROP_RESET_SEQNO) {
- intel_runtime_pm_get(i915);
+ if (ret == 0 && val & DROP_RESET_SEQNO)
ret = i915_gem_set_global_seqno(&i915->drm, 1);
- intel_runtime_pm_put(i915);
- }
if (val & DROP_RETIRE)
i915_retire_requests(i915);
@@ -4231,6 +4252,9 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
+out:
+ intel_runtime_pm_put(i915);
+
return ret;
}
@@ -4331,7 +4355,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
- * only valid bits for those registers, excluding reserverd
+ * only valid bits for those registers, excluding reserved
* although this seems wrong because it would leave many
* subslices without ACK.
*/
@@ -4571,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ synchronize_irq(dev_priv->drm.irq);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_work(&dev_priv->hotplug.hotplug_work);
+
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
@@ -4641,24 +4672,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write
};
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
static int i915_drrs_ctl_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 7)
return -ENODEV;
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (!intel_crtc->base.state->active ||
- !intel_crtc->config->has_drrs)
- continue;
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->base.active ||
+ !crtc_state->has_drrs)
+ goto out;
- for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+ commit = crtc_state->base.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->base.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(connector);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -4668,13 +4797,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(&encoder->base);
if (val)
intel_edp_drrs_enable(intel_dp,
- intel_crtc->config);
+ crtc_state);
else
intel_edp_drrs_disable(intel_dp,
- intel_crtc->config);
+ crtc_state);
}
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
}
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -4818,6 +4952,7 @@ static const struct i915_debugfs_files {
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5034,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
continue;
err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err <= 0) {
- DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
- size, b->offset, err);
- continue;
- }
-
- seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+ if (err < 0)
+ seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
+ else
+ seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
}
return 0;
@@ -4934,6 +5066,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_panel);
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
+ "None" : "HDCP1.4");
+ seq_puts(m, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -4963,5 +5117,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..b310a897a4ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
+#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* Use PCH_NOP (PCH but no South Display) for PCH platforms without
* display.
*/
- if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (pch && !HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
@@ -345,7 +346,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = USES_PPGTT(dev_priv);
+ value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +646,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
+ if (HAS_DISPLAY(dev_priv)) {
+ ret = drm_vblank_init(&dev_priv->drm,
+ INTEL_INFO(dev_priv)->num_pipes);
+ if (ret)
+ goto out;
+ }
+
intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,9 +695,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_modeset;
- intel_setup_overlay(dev_priv);
+ intel_overlay_setup(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
ret = intel_fbdev_init(dev);
@@ -699,6 +707,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
+ intel_init_ipc(dev_priv);
+
return 0;
cleanup_gem:
@@ -859,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+ pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
@@ -1030,6 +1041,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
err_uncore:
intel_uncore_fini(dev_priv);
+ i915_mmio_cleanup(dev_priv);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1049,17 +1061,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915_modparams.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev_priv,
- i915_modparams.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
-
intel_gvt_sanitize_options(dev_priv);
}
@@ -1340,7 +1341,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@@ -1375,6 +1376,29 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+ if (HAS_PPGTT(dev_priv)) {
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ i915_report_error(dev_priv,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
+ return -ENXIO;
+ }
+ }
+
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@@ -1444,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1543,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -1567,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (INTEL_INFO(dev_priv)->num_pipes)
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1630,14 +1655,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
+ int err;
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
if (!i915)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+ err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
+ if (err) {
kfree(i915);
- return NULL;
+ return ERR_PTR(err);
}
i915->drm.pdev = pdev;
@@ -1650,8 +1677,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
device_info->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ BITS_PER_TYPE(device_info->platform_mask));
+ BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
}
@@ -1686,8 +1713,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
dev_priv = i915_driver_create(pdev, ent);
- if (!dev_priv)
- return -ENOMEM;
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1711,26 +1738,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- /*
- * TODO: move the vblank init and parts of modeset init steps into one
- * of the i915_driver_init_/i915_driver_register functions according
- * to the role/effect of the given init step.
- */
- if (INTEL_INFO(dev_priv)->num_pipes) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
- if (ret)
- goto out_cleanup_hw;
- }
-
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
i915_driver_register(dev_priv);
- intel_init_ipc(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
i915_welcome_messages(dev_priv);
@@ -1782,7 +1795,6 @@ void i915_driver_unload(struct drm_device *dev)
i915_reset_error_state(dev_priv);
i915_gem_fini(dev_priv);
- intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@@ -1920,9 +1932,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev_priv, opregion_target_state);
-
- intel_opregion_unregister(dev_priv);
+ intel_opregion_suspend(dev_priv, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1963,7 +1973,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
get_suspend_mode(dev_priv, hibernation));
ret = 0;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -2041,7 +2051,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
- intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev_priv);
@@ -2083,12 +2092,10 @@ static int i915_drm_resume(struct drm_device *dev)
* */
intel_hpd_init(dev_priv);
- intel_opregion_register(dev_priv);
+ intel_opregion_resume(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
-
intel_power_domains_enable(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2156,7 +2163,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2923,7 +2930,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
ret = 0;
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ icl_display_core_uninit(dev_priv);
+ bxt_enable_dc9(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3008,7 +3018,18 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ bxt_disable_dc9(dev_priv);
+ icl_display_core_init(dev_priv, true);
+ if (dev_priv->csr.dmc_payload) {
+ if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(dev_priv);
+ else if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(dev_priv);
+ }
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..b1c31967194b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,9 @@
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
+#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -67,6 +69,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -87,8 +90,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180921"
-#define DRIVER_TIMESTAMP 1537521997
+#define DRIVER_DATE "20181204"
+#define DRIVER_TIMESTAMP 1543944377
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +130,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-typedef struct {
- uint32_t val;
-} uint_fixed_16_16_t;
-
-#define FP_16_16_MAX ({ \
- uint_fixed_16_16_t fp; \
- fp.val = UINT_MAX; \
- fp; \
-})
-
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
-{
- if (val.val == 0)
- return true;
- return false;
-}
-
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
-{
- uint_fixed_16_16_t fp;
-
- WARN_ON(val > U16_MAX);
-
- fp.val = val << 16;
- return fp;
-}
-
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
-{
- return DIV_ROUND_UP(fp.val, 1 << 16);
-}
-
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
-{
- return fp.val >> 16;
-}
-
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
- uint_fixed_16_16_t min2)
-{
- uint_fixed_16_16_t min;
-
- min.val = min(min1.val, min2.val);
- return min;
-}
-
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
- uint_fixed_16_16_t max2)
-{
- uint_fixed_16_16_t max;
-
- max.val = max(max1.val, max2.val);
- return max;
-}
-
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
-{
- uint_fixed_16_16_t fp;
- WARN_ON(val > U32_MAX);
- fp.val = (uint32_t) val;
- return fp;
-}
-
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t d)
-{
- return DIV_ROUND_UP(val.val, d.val);
-}
-
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
- WARN_ON(intermediate_val > U32_MAX);
- return (uint32_t) intermediate_val;
-}
-
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val.val * mul.val;
- intermediate_val = intermediate_val >> 16;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- return clamp_u64_to_fixed16(interm_val);
-}
-
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
- WARN_ON(interm_val > U32_MAX);
- return (uint32_t) interm_val;
-}
-
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
- uint_fixed_16_16_t add2)
-{
- uint64_t interm_sum;
-
- interm_sum = (uint64_t) add1.val + add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
- uint32_t add2)
-{
- uint64_t interm_sum;
- uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
-
- interm_sum = (uint64_t) add1.val + interm_add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -283,7 +148,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
-#define HPD_STORM_DEFAULT_THRESHOLD 5
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
struct work_struct hotplug_work;
@@ -308,6 +174,8 @@ struct i915_hotplug {
bool poll_enabled;
unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +333,10 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
+ uint32_t required_version;
+ uint32_t max_fw_size; /* bytes */
uint32_t *dmc_payload;
- uint32_t dmc_fw_size;
+ uint32_t dmc_fw_size; /* dwords */
uint32_t version;
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
@@ -546,6 +416,8 @@ struct intel_fbc {
int adjusted_y;
int y;
+
+ uint16_t pixel_blend_mode;
} plane;
struct {
@@ -624,17 +496,19 @@ struct i915_psr {
bool sink_support;
bool prepared, enabled;
struct intel_dp *dp;
+ enum pipe pipe;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
- bool alpm;
bool psr2_enabled;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
};
enum intel_pch {
@@ -918,6 +792,11 @@ struct i915_power_well_desc {
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
@@ -1042,17 +921,6 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define DDC_PIN_B 0x05
-#define DDC_PIN_C 0x04
-#define DDC_PIN_D 0x06
-
struct ddi_vbt_port_info {
int max_tmds_clock;
@@ -1099,6 +967,7 @@ struct intel_vbt_data {
unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ enum drm_panel_orientation orientation;
enum drrs_support_type drrs_type;
@@ -1144,6 +1013,7 @@ struct intel_vbt_data {
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
} dsi;
int crt_ddc_pin;
@@ -1228,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
@@ -1240,9 +1107,9 @@ struct skl_ddb_values {
};
struct skl_wm_level {
- bool plane_en;
uint16_t plane_res_b;
uint8_t plane_res_l;
+ bool plane_en;
};
/* Stores plane specific WM parameters */
@@ -1323,20 +1190,6 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
-struct i915_wa_reg {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
- u32 count;
-};
-
struct i915_virtual_gpu {
bool active;
u32 caps;
@@ -1520,30 +1373,12 @@ struct i915_oa_ops {
bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
/**
- * @init_oa_buffer: Resets the head and tail pointers of the
- * circular buffer for periodic OA reports.
- *
- * Called when first opening a stream for OA metrics, but also may be
- * called in response to an OA buffer overflow or other error
- * condition.
- *
- * Note it may be necessary to clear the full OA buffer here as part of
- * maintaining the invariable that new reports must be written to
- * zeroed memory for us to be able to reliable detect if an expected
- * report has not yet landed in memory. (At least on Haswell the OA
- * buffer tail pointer is not synchronized with reports being visible
- * to the CPU)
- */
- void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
-
- /**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config);
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1389,12 @@ struct i915_oa_ops {
/**
* @oa_enable: Enable periodic sampling
*/
- void (*oa_enable)(struct drm_i915_private *dev_priv);
+ void (*oa_enable)(struct i915_perf_stream *stream);
/**
* @oa_disable: Disable periodic sampling
*/
- void (*oa_disable)(struct drm_i915_private *dev_priv);
+ void (*oa_disable)(struct i915_perf_stream *stream);
/**
* @read: Copy data from the circular OA buffer into a given userspace
@@ -1804,7 +1639,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -2148,6 +1983,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -2322,6 +2159,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+bool i915_sg_trim(struct sg_table *orig_st);
+
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
@@ -2367,20 +2206,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
-#define GEN_FOREVER (0)
-
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
- GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
- (s) != GEN_FOREVER ? (s) - 1 : 0) \
-)
+ GENMASK((e) - 1, (s) - 1))
-/*
- * Returns true if Gen is in inclusive range [Start, End].
- *
- * Use GEN_FOREVER for unbound start and or end.
- */
+/* Returns true if Gen is in inclusive range [Start, End] */
#define IS_GEN(dev_priv, s, e) \
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
@@ -2461,6 +2292,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
+ INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2592,17 +2425,22 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define HAS_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define HAS_FULL_48BIT_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
+
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.overlay_needs_physical)
+ ((dev_priv)->info.display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2623,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
@@ -2708,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
@@ -2720,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@@ -2742,9 +2582,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
return IS_BROXTON(dev_priv) && intel_vtd_active();
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt);
-
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3229,7 +3066,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3461,6 +3298,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
/* intel_acpi.c */
#ifdef CONFIG_ACPI
@@ -3482,8 +3320,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_connector_register(struct drm_connector *);
-extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
bool state);
extern void intel_display_resume(struct drm_device *dev);
@@ -3496,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3583,6 +3422,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
+/* intel_combo_phy.c */
+void icl_combo_phys_init(struct drm_i915_private *dev_priv);
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
@@ -3870,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 000000000000..591dd89ba7af
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_FIXED_H_
+#define _I915_FIXED_H_
+
+typedef struct {
+ u32 val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
+
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+ return val.val == 0;
+}
+
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
+{
+ uint_fixed_16_16_t fp = { .val = val << 16 };
+
+ WARN_ON(val > U16_MAX);
+
+ return fp;
+}
+
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+ return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
+{
+ return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
+ uint_fixed_16_16_t min2)
+{
+ uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
+
+ return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
+ uint_fixed_16_16_t max2)
+{
+ uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
+
+ return max;
+}
+
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
+{
+ uint_fixed_16_16_t fp = { .val = (u32)val };
+
+ WARN_ON(val > U32_MAX);
+
+ return fp;
+}
+
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t d)
+{
+ return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+ tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val.val * mul.val;
+ tmp = tmp >> 16;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d);
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d.val);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ u64 tmp;
+
+ tmp = (u64)add1.val + add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ u32 add2)
+{
+ uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
+ u64 tmp;
+
+ tmp = (u64)add1.val + tmp_add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..d36a9755ad91 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
+ struct pagevec pvec;
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- put_page(page);
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
+ struct pagevec pvec;
gfp_t noreclaim;
int ret;
@@ -2559,6 +2579,7 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
@@ -2573,6 +2594,7 @@ rebuild_st:
gfp_t gfp = noreclaim;
do {
+ cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (likely(!IS_ERR(page)))
break;
@@ -2583,7 +2605,6 @@ rebuild_st:
}
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
- cond_resched();
/*
* We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
err_sg:
sg_mark_end(sg);
err_pages:
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
@@ -3282,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
unsigned long flags;
GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3327,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
- /*
- * Make sure no one is running the old callback before we proceed with
- * cancelling requests and resetting the completion tracking. Otherwise
- * we might submit a request to the hardware which never completes.
- */
- synchronize_rcu();
-
for_each_engine(engine, i915, id) {
- /* Mark all executing requests as skipped */
- engine->cancel_requests(engine);
-
- /*
- * Only once we've force-cancelled all in-flight requests can we
- * start to complete all requests.
- */
- engine->submit_request = nop_complete_submit_request;
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
- * in nop_complete_submit_request.
+ * in nop_submit_request.
*/
synchronize_rcu();
- for_each_engine(engine, i915, id) {
- unsigned long flags;
-
- /*
- * Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+ for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
}
out:
@@ -3530,6 +3523,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
+ destroy_rcu_head(&s->rcu);
+
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
@@ -3646,6 +3641,7 @@ out_rearm:
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
+ init_rcu_head(&s->rcu);
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3739,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
start = ktime_get();
ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ I915_WAIT_ALL,
to_wait_timeout(args->timeout_ns),
to_rps_client(file));
@@ -4710,6 +4708,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
+ init_rcu_head(&obj->rcu);
+
obj->ops = ops;
reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +4977,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
+ * We reuse obj->rcu for the freed list, so we had better not treat
+ * it like a rcu_head from this point forwards. And we expect all
+ * objects to be freed via this path.
+ */
+ destroy_rcu_head(&obj->rcu);
+
+ /*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
@@ -5293,19 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev_priv)) {
- if (IS_IVYBRIDGE(dev_priv)) {
- u32 temp = I915_READ(GEN7_MSG_CTL);
- temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
- I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
- temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
- }
- }
-
- intel_gt_workarounds_apply(dev_priv);
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(dev_priv);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
@@ -5500,6 +5498,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5546,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5667,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
@@ -5951,7 +5999,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- sizeof(atomic_t) * BITS_PER_BYTE);
+ BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6eb1ea..b0e4b976880c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
#define GEM_DEBUG_DECL(var) var
#define GEM_DEBUG_EXEC(expr) expr
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
#define GEM_SHOW_DEBUG() (0)
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
-#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
#define GEM_DEBUG_DECL(var)
#define GEM_DEBUG_EXEC(expr) do { } while (0)
#define GEM_DEBUG_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593b99ab..371c07087095 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
- ctx->sched.priority = I915_PRIORITY_NORMAL;
+ ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ctx))
return ctx;
- if (USES_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = __create_hw_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
}
i915_gem_context_clear_bannable(ctx);
- ctx->sched.priority = prio;
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- ret = intel_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -879,7 +875,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
- args->value = ctx->sched.priority;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
default:
ret = -EINVAL;
@@ -948,7 +944,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
- ctx->sched.priority = priority;
+ ctx->sched.priority =
+ I915_USER_PRIORITY(priority);
}
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6a0a84..f6d870b1f73e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
+ struct intel_engine_cs *active;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a1c04db6c80..10a4afb4f235 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 3;
+ len = 6;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
+
+ /* And again for good measure (blb/pnv) */
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = addr;
+ *batch++ = target_offset;
}
goto out;
@@ -2157,7 +2162,7 @@ await_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_WAIT))
continue;
- drm_syncobj_search_fence(syncobj, 0, 0, &fence);
+ fence = drm_syncobj_fence_get(syncobj);
if (!fence)
return -EINVAL;
@@ -2186,7 +2191,7 @@ signal_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue;
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 47c302543799..add1fe7aeb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
i915->ggtt.invalidate(i915);
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt)
-{
- bool has_full_ppgtt;
- bool has_full_48bit_ppgtt;
-
- if (!dev_priv->info.has_aliasing_ppgtt)
- return 0;
-
- has_full_ppgtt = dev_priv->info.has_full_ppgtt;
- has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
-
- if (intel_vgpu_active(dev_priv)) {
- /* GVT-g has no support for 32bit ppgtt */
- has_full_ppgtt = false;
- has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
- }
-
- /*
- * We don't allow disabling PPGTT for gen9+ as it's a requirement for
- * execlists, the sole mechanism available to submit work.
- */
- if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
- return 0;
-
- if (enable_ppgtt == 1)
- return 1;
-
- if (enable_ppgtt == 2 && has_full_ppgtt)
- return 2;
-
- if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
- return 3;
-
- /* Disable ppgtt on SNB if VT-d is on. */
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
- DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return 0;
- }
-
- if (has_full_48bit_ppgtt)
- return 3;
-
- if (has_full_ppgtt)
- return 2;
-
- return 1;
-}
-
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert().
- *
- * TODO: we should really consider write-protecting the scratch-page and
- * sharing between ppgtt
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill_px(vm, pt, vm->scratch_pte);
}
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+ fill32_px(vm, pt, vm->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
struct i915_page_table *pt,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
- vaddr[pte++] = scratch_pte;
+ vaddr[pte++] = vm->scratch_pte;
kunmap_atomic(vaddr);
return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = pte_encode | vma->vm->scratch_page.daddr;
+ encode = vma->vm->scratch_pte;
vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only &&
+ vm->i915->kernel_context &&
+ vm->i915->kernel_context->ppgtt) {
+ struct i915_address_space *clone =
+ &vm->i915->kernel_context->ppgtt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_pte = clone->scratch_pte;
+ vm->scratch_pt = clone->scratch_pt;
+ vm->scratch_pd = clone->scratch_pd;
+ vm->scratch_pdp = clone->scratch_pdp;
+ return 0;
+ }
+
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ vm->scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC,
+ PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
+ if (!vm->scratch_page.daddr)
+ return;
+
if (use_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
+ ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
- /*
- * From bdw, there is support for read-only pages in the PPGTT.
- *
- * XXX GVT is not honouring the lack of RW in the PTE bits.
- */
- ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+ /* From bdw, there is support for read-only pages in the PPGTT. */
+ ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915);
@@ -1721,7 +1691,7 @@ err_free:
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = base->vm.scratch_pte;
struct i915_page_table *pt;
u32 pte, pde;
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
-static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
- GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
- }
-}
-
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(ppgtt, pt);
+ gen6_initialize_pt(vm, pt);
ppgtt->base.pd.page_table[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE, PTE_READ_ONLY);
+ vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE,
+ PTE_READ_ONLY);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_initialize_pt(vm, vm->scratch_pt);
gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
- /* In the case of execlists, PPGTT is enabled by the context descriptor
- * and the PDPs are contained within the context itself. We don't
- * need to do anything here. */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
- return 0;
-
- if (!USES_PPGTT(dev_priv))
- return 0;
-
if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_enable(dev_priv);
- else
- MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ scratch_pte = vm->scratch_pte;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
+ if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
+ ggtt->vm.scratch_pte =
+ ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+ I915_CACHE_NONE, 0);
+
return 0;
}
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- if (!USES_PPGTT(ppat->i915)) {
+ if (!HAS_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
- if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ if (intel_scanout_needs_vtd_wa(dev_priv))
ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+
+ /* Prevent recursively calling stop_machine() and deadlocks. */
+ dev_info(dev_priv->drm.dev,
+ "Disabling error capture for VT-d workaround\n");
+ i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
- if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
+ if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
static struct scatterlist *
-rotate_pages(const dma_addr_t *in, unsigned int offset,
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int src_idx;
for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column;
+ src_idx = stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) = in[offset + src_idx];
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
@@ -3742,22 +3697,11 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
- struct sgt_iter sgt_iter;
- dma_addr_t dma_addr;
- unsigned long i;
- dma_addr_t *page_addr_list;
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
-
- /* Allocate a temporary list of source pages for random access. */
- page_addr_list = kvmalloc_array(n_pages,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!page_addr_list)
- return ERR_PTR(ret);
+ int i;
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
if (ret)
goto err_sg_alloc;
- /* Populate source page list from the object. */
- i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
- page_addr_list[i++] = dma_addr;
-
- GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
rot_info->plane[i].stride, st, sg);
}
- kvfree(page_addr_list);
-
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
- kvfree(page_addr_list);
DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
return st;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 28039290655c..4874da09a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
struct mutex mutex; /* protects vma and our lists */
+ u64 scratch_pte;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
- /* FIXME: Need a more generic return type */
- gen6_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
- /* flags for pte_encode */
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY (1<<0)
+
int (*allocate_va_range)(struct i915_address_space *vm,
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- gen6_pte_t scratch_pte;
unsigned int pin_count;
bool scan_for_unused_pt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..07465123c166 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
*
*/
-#include <generated/utsrelease.h>
+#include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
#include <linux/stop_machine.h>
+#include <linux/utsname.h>
#include <linux/zlib.h>
+
#include <drm/drm_print.h>
-#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+ void *addr, unsigned int len, loff_t it)
{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
+ sg->page_link = (unsigned long)virt_to_page(addr);
+ sg->offset = offset_in_page(addr);
+ sg->length = len;
+ sg->dma_address = it;
}
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
{
- if (e->pos + len <= e->start) {
- e->pos += len;
+ if (!len)
return false;
- }
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
+ if (e->bytes + len + 1 <= e->size)
+ return true;
- return true;
-}
+ if (e->bytes) {
+ __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+ e->iter += e->bytes;
+ e->buf = NULL;
+ e->bytes = 0;
+ }
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
+ if (e->cur == e->end) {
+ struct scatterlist *sgl;
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
+ sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ if (!sgl) {
+ e->err = -ENOMEM;
+ return false;
+ }
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
+ if (e->cur) {
+ e->cur->offset = 0;
+ e->cur->length = 0;
+ e->cur->page_link =
+ (unsigned long)sgl | SG_CHAIN;
+ } else {
+ e->sgl = sgl;
}
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
+ e->cur = sgl;
+ e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
}
- e->bytes += len;
- e->pos += len;
+ e->size = ALIGN(len + 1, SZ_64K);
+ e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!e->buf) {
+ e->size = PAGE_ALIGN(len + 1);
+ e->buf = kmalloc(e->size, GFP_KERNEL);
+ }
+ if (!e->buf) {
+ e->err = -ENOMEM;
+ return false;
+ }
+
+ return true;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
+ const char *fmt, va_list args)
{
- unsigned len;
+ va_list ap;
+ int len;
- if (!__i915_error_ok(e))
+ if (e->err)
return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- va_list tmp;
-
- va_copy(tmp, args);
- len = vsnprintf(NULL, 0, f, tmp);
- va_end(tmp);
-
- if (!__i915_error_seek(e, len))
- return;
+ va_copy(ap, args);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+ if (len <= 0) {
+ e->err = len;
+ return;
}
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ if (!__i915_error_grow(e, len))
+ return;
- __i915_error_advance(e, len);
+ GEM_BUG_ON(e->bytes >= e->size);
+ len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+ if (len < 0) {
+ e->err = len;
+ return;
+ }
+ e->bytes += len;
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
{
unsigned len;
- if (!__i915_error_ok(e))
+ if (e->err || !str)
return;
len = strlen(str);
+ if (!__i915_error_grow(e, len))
+ return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ GEM_BUG_ON(e->bytes + len > e->size);
memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
+ e->bytes += len;
}
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -512,7 +515,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
- if (USES_PPGTT(m->i915)) {
+ if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
if (INTEL_GEN(m->i915) >= 8) {
@@ -635,22 +638,33 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
+{
+ while (sgl) {
+ struct scatterlist *sg;
+
+ for (sg = sgl; !sg_is_chain(sg); sg++) {
+ kfree(sg_virt(sg));
+ if (sg_is_last(sg))
+ break;
+ }
+
+ sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+ free_page((unsigned long)sgl);
+ sgl = sg;
+ }
+}
+
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+ struct i915_gpu_state *error)
{
- struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj;
struct timespec64 ts;
int i, j;
- if (!error) {
- err_printf(m, "No error state collected\n");
- return 0;
- }
-
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Kernel: %s\n", init_utsname()->release);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -680,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_print_pciid(m, error->i915);
+ err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev_priv)) {
- struct intel_csr *csr = &dev_priv->csr;
+ if (HAS_CSR(m->i915)) {
+ struct intel_csr *csr = &m->i915->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
@@ -705,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+ err_printf(m, "Missed interrupts: 0x%08lx\n",
+ m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (INTEL_GEN(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN7(m->i915))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -742,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j]->name);
+ m->i915->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -760,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i]->name);
+ err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
@@ -772,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, m->i915->engine[i], NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -791,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i]->name);
+ m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -804,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"NULL context", ee->default_state);
}
@@ -832,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
+}
+
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+ struct drm_i915_error_state_buf m;
+
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ if (READ_ONCE(error->sgl))
+ return 0;
+
+ memset(&m, 0, sizeof(m));
+ m.i915 = error->i915;
+
+ __err_print_to_sgl(&m, error);
- if (m->bytes == 0 && m->err)
- return m->err;
+ if (m.buf) {
+ __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+ m.bytes = 0;
+ m.buf = NULL;
+ }
+ if (m.cur) {
+ GEM_BUG_ON(m.end < m.cur);
+ sg_mark_end(m.cur - 1);
+ }
+ GEM_BUG_ON(m.sgl && !m.cur);
+
+ if (m.err) {
+ err_free_sgl(m.sgl);
+ return m.err;
+ }
+
+ if (cmpxchg(&error->sgl, NULL, m.sgl))
+ err_free_sgl(m.sgl);
return 0;
}
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
- struct drm_i915_private *i915,
- size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t off, size_t rem)
{
- memset(ebuf, 0, sizeof(*ebuf));
- ebuf->i915 = i915;
+ struct scatterlist *sg;
+ size_t count;
+ loff_t pos;
+ int err;
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size,
- GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (!error || !rem)
+ return 0;
- if (ebuf->buf == NULL) {
- ebuf->size = PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ err = err_print_to_sgl(error);
+ if (err)
+ return err;
- if (ebuf->buf == NULL) {
- ebuf->size = 128;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ sg = READ_ONCE(error->fit);
+ if (!sg || off < sg->dma_address)
+ sg = error->sgl;
+ if (!sg)
+ return 0;
- if (ebuf->buf == NULL)
- return -ENOMEM;
+ pos = sg->dma_address;
+ count = 0;
+ do {
+ size_t len, start;
- ebuf->start = pos;
+ if (sg_is_chain(sg)) {
+ sg = sg_chain_ptr(sg);
+ GEM_BUG_ON(sg_is_chain(sg));
+ }
- return 0;
+ len = sg->length;
+ if (pos + len <= off) {
+ pos += len;
+ continue;
+ }
+
+ start = sg->offset;
+ if (pos < off) {
+ GEM_BUG_ON(off - pos > len);
+ len -= off - pos;
+ start += off - pos;
+ pos = off;
+ }
+
+ len = min(len, rem);
+ GEM_BUG_ON(!len || len > sg->length);
+
+ memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+ count += len;
+ pos += len;
+
+ buf += len;
+ rem -= len;
+ if (!rem) {
+ WRITE_ONCE(error->fit, sg);
+ break;
+ }
+ } while (!sg_is_last(sg++));
+
+ return count;
}
static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -941,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
cleanup_params(error);
cleanup_uc_state(error);
+ err_free_sgl(error->sgl);
kfree(error);
}
@@ -999,7 +1079,6 @@ i915_error_object_create(struct drm_i915_private *i915,
}
compress_fini(&compress, dst);
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1268,7 +1347,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
- if (USES_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(dev_priv)) {
int i;
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1492,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
@@ -1785,6 +1864,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
return epoch;
}
+static void capture_finish(struct i915_gpu_state *error)
+{
+ struct i915_ggtt *ggtt = &error->i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@@ -1809,6 +1896,7 @@ static int capture(void *data)
error->epoch = capture_find_epoch(error);
+ capture_finish(error);
return 0;
}
@@ -1859,6 +1947,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ i915_disable_error_state(i915, -ENOMEM);
return;
}
@@ -1914,5 +2003,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
- i915_gpu_state_put(error);
+ if (!IS_ERR(error))
+ i915_gpu_state_put(error);
+}
+
+void i915_disable_error_state(struct drm_i915_private *i915, int err)
+{
+ spin_lock_irq(&i915->gpu_error.lock);
+ if (!i915->gpu_error.first_error)
+ i915->gpu_error.first_error = ERR_PTR(err);
+ spin_unlock_irq(&i915->gpu_error.lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..ff2652bbb0b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
- unsigned int bytes;
- unsigned int size;
+ struct scatterlist *sgl, *cur, *end;
+
+ char *buf;
+ size_t bytes;
+ size_t size;
+ loff_t iter;
+
int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
return gpu;
}
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t offset, size_t count);
+
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
@@ -343,6 +339,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
+void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
@@ -355,13 +352,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
static inline struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
+static inline void i915_disable_error_state(struct drm_i915_private *i915,
+ int err)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e242270e270..d447d7d508f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
return ret;
}
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN8_MASTER_IRQ);
+}
+
+static inline void gen8_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+}
+
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
+ void __iomem * const regs = dev_priv->regs;
u32 master_ctl;
u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
- if (!master_ctl)
+ master_ctl = gen8_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen8_master_intr_enable(regs);
return IRQ_NONE;
-
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(dev_priv);
}
- I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ gen8_master_intr_enable(regs);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
intel_opregion_asle_intr(dev_priv);
}
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+}
+
+static inline void gen11_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
- master_ctl &= ~GEN11_MASTER_IRQ;
- if (!master_ctl)
+ master_ctl = gen11_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen11_master_intr_enable(regs);
return IRQ_NONE;
-
- /* Disable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt. */
gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
- /* Acknowledge and enable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_master_intr_enable(regs);
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_disable(dev_priv->regs);
gen8_gt_irq_reset(dev_priv);
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_disable(dev_priv->regs);
gen11_gt_irq_reset(dev_priv);
I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ /* If we have MST support, we want to avoid doing short HPD IRQ storm
+ * detection, as short HPD storms will occur as a natural part of
+ * sideband messaging with MST.
+ * On older platforms however, IRQ storms can occur with both long and
+ * short pulses, as seen on some G4x systems.
+ */
+ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8b5083..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16162ac..0e667f1a8aa1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304ec16a..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963a2383..679e92cf4f1d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae30e343..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268ef2ea2..4d6025559bbe 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facdb6702..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aac01b9..0697f4077402 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb2c3c8..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b9622496979e..0986eae3135f 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c87cc0..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b131105..e830a406aff2 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db587957c..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113f4bc9..06dedf991edb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b96d7ab..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d03439c157..3d0c870cd0bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a5667926e3de..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24aafe4f..24eaa97d61ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a40bbc8..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b803546dc1..a55398a904de 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb6679a1bc..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c1923e..3ddd3483b7cc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd8a50d..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c03958..be6256037239 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7c8135..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2616c8..650beb068e56 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2d82df..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd525c8b1..8dcf849d131e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981e4a39..2e0356561839 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_ppgtt, int, 0400,
- "Override PPGTT usage. "
- "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
+#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+#endif
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
- BUILD_BUG();
+ WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
+ type, name);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a21474b..7e56c516c815 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
param(int, vbt_sdvo_panel_type, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
- param(int, enable_ppgtt, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9fe1d26..6350db5503cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -33,19 +33,30 @@
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define GEN_DEFAULT_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ }
#define GEN_CHV_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- CHV_PIPE_C_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- CHV_TRANSCODER_C_OFFSET, }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
- CHV_PALETTE_C_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
+ }
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -68,8 +79,9 @@
#define GEN2_FEATURES \
GEN(2), \
.num_pipes = 1, \
- .has_overlay = 1, .overlay_needs_physical = 1, \
- .has_gmch_display = 1, \
+ .display.has_overlay = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch_display = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -82,7 +94,8 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_mobile = 1,
+ .display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
@@ -96,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
- .cursor_needs_physical = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
@@ -108,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .has_gmch_display = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -120,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -130,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -141,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -151,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -162,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
#define GEN4_FEATURES \
GEN(4), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_gmch_display = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -189,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
- .has_overlay = 1,
+ .display.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -197,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
- .is_mobile = 1, .has_fbc = 1,
- .has_overlay = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.has_overlay = 1,
+ .display.supports_tv = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -213,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
- .is_mobile = 1, .has_fbc = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
GEN(5), \
.num_pipes = 2, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -239,20 +260,21 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
- .is_mobile = 1, .has_fbc = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
};
#define GEN6_FEATURES \
GEN(6), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_ALIASING, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -290,15 +312,14 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.num_pipes = 3, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
@@ -349,10 +370,9 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmch_display = 1,
- .has_hotplug = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .display.has_hotplug = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -365,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_psr = 1, \
- .has_dp_mst = 1, \
+ .display.has_psr = 1, \
+ .display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -399,7 +419,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -435,16 +455,15 @@ static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.num_pipes = 3,
- .has_hotplug = 1,
+ .display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .has_gmch_display = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -465,13 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_guc = 1, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
.ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
+ /* Display WA #0477 WaDisableIPC: skl */ \
+ .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -502,29 +523,27 @@ static const struct intel_device_info intel_skylake_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_fbc = 1, \
- .has_psr = 1, \
+ .display.has_fbc = 1, \
+ .display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_rc6 = 1, \
- .has_dp_mst = 1, \
+ .display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -598,6 +617,22 @@ static const struct intel_device_info intel_cannonlake_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1
@@ -663,7 +698,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
+ INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +706,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96bb65a3..4529edfdcfc8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto err_unpin;
}
- dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
-
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
/* PRM:
*
* OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
return 0;
}
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
}
-static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct i915_gem_context *ctx =
- dev_priv->perf.oa.exclusive_stream->ctx;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
bool periodic = dev_priv->perf.oa.periodic;
u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
GEN7_OACONTROL_ENABLE);
}
-static void gen8_oa_enable(struct drm_i915_private *dev_priv)
+static void gen8_oa_enable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
u32 report_format = dev_priv->perf.oa.oa_buffer.format;
/*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
HRTIMER_MODE_REL_PINNED);
}
-static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+static void gen7_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN7_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
-static void gen8_oa_disable(struct drm_i915_private *dev_priv)
+static void gen8_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN8_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+ if (!dev_priv->perf.oa.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_lock;
- ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
- stream->oa_config);
+ ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*/
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502eef2431..6fc4b8eeab42 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices *
- DIV_ROUND_UP(sseu->max_subslices,
- sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+ DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
eu_length = sseu->max_slices * sseu->max_subslices *
DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e31c27e45734..0a7d60509ca7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/*
* Named helper wrappers around _PICK_EVEN() and _PICK().
*/
-#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
-#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
-#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
-#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
-#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
-#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
+
+#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
+#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b))
+#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
+#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
+#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
+
+#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
+
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+
+/*
+ * Device info offset array based helpers for groups of registers with unevenly
+ * spaced base offsets.
+ */
+#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
+ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
+ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
-
-#define _ICL_PORT_CL_DW5_A 0x162014
-#define _ICL_PORT_CL_DW5_B 0x6C014
-#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
- _ICL_PORT_CL_DW5_B)
-
-#define _CNL_PORT_CL_DW10_A 0x162028
-#define _ICL_PORT_CL_DW10_B 0x6c028
-#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
- _CNL_PORT_CL_DW10_A, \
- _ICL_PORT_CL_DW10_B)
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_1 (0x2 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-#define _ICL_PORT_CL_DW12_A 0x162030
-#define _ICL_PORT_CL_DW12_B 0x6C030
-#define ICL_LANE_ENABLE_AUX (1 << 0)
-#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
- _ICL_PORT_CL_DW12_A, \
- _ICL_PORT_CL_DW12_B)
-
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+/*
+ * CNL/ICL Port/COMBO-PHY Registers
+ */
+#define _ICL_COMBOPHY_A 0x162000
+#define _ICL_COMBOPHY_B 0x6C000
+#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B)
+
+/* CNL/ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ 4 * (dw))
+
+#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
+#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define CL_POWER_DOWN_ENABLE (1 << 4)
+#define SUS_CLOCK_CONFIG (3 << 0)
+
+#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
+#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+
+/* CNL/ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP 0x100
+#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_COMP + 4 * (dw))
+
+#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
+#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define COMP_INIT (1 << 31)
+
+#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
+#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+
+#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
+#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define PROCESS_INFO_DOT_0 (0 << 26)
+#define PROCESS_INFO_DOT_1 (1 << 26)
+#define PROCESS_INFO_DOT_4 (2 << 26)
+#define PROCESS_INFO_MASK (7 << 26)
+#define PROCESS_INFO_SHIFT 26
+#define VOLTAGE_INFO_0_85V (0 << 24)
+#define VOLTAGE_INFO_0_95V (1 << 24)
+#define VOLTAGE_INFO_1_05V (2 << 24)
+#define VOLTAGE_INFO_MASK (3 << 24)
+#define VOLTAGE_INFO_SHIFT 24
+
+#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
+#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+
+#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
+#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+
+/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-
#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
-#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
-#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
-#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
-#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
-#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
-#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
- _ICL_PORT_PCS_DW1_GRP_A, \
- _ICL_PORT_PCS_DW1_GRP_B)
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_LN0_A, \
- _ICL_PORT_PCS_DW1_LN0_B)
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_AUX_A, \
- _ICL_PORT_PCS_DW1_AUX_B)
+#define _ICL_PORT_PCS_AUX 0x300
+#define _ICL_PORT_PCS_GRP 0x600
+#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
+#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
+#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
#define COMMON_KEEPER_EN (1 << 26)
-/* CNL Port TX registers */
+/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
_CNL_PORT_TX_F_LN0_OFFSET) + \
4 * (dw))
-#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
-#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
-#define _ICL_PORT_TX_DW2_GRP_A 0x162688
-#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
-#define _ICL_PORT_TX_DW2_LN0_A 0x162888
-#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
-#define _ICL_PORT_TX_DW2_AUX_A 0x162388
-#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_GRP_A, \
- _ICL_PORT_TX_DW2_GRP_B)
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_LN0_A, \
- _ICL_PORT_TX_DW2_LN0_B)
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_AUX_A, \
- _ICL_PORT_TX_DW2_AUX_B)
+#define _ICL_PORT_TX_AUX 0x380
+#define _ICL_PORT_TX_GRP 0x680
+#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
+#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
+#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
+#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define _ICL_PORT_TX_DW4_GRP_A 0x162690
-#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
-#define _ICL_PORT_TX_DW4_LN0_A 0x162890
-#define _ICL_PORT_TX_DW4_LN1_A 0x162990
-#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
-#define _ICL_PORT_TX_DW4_AUX_A 0x162390
-#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_GRP_A, \
- _ICL_PORT_TX_DW4_GRP_B)
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
- _ICL_PORT_TX_DW4_LN0_A, \
- _ICL_PORT_TX_DW4_LN0_B) + \
- ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_AUX_A, \
- _ICL_PORT_TX_DW4_AUX_B)
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
+#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
+#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
+#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
#define CURSOR_COEFF(x) ((x) << 0)
#define CURSOR_COEFF_MASK (0x3F << 0)
-#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
-#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
-#define _ICL_PORT_TX_DW5_GRP_A 0x162694
-#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
-#define _ICL_PORT_TX_DW5_LN0_A 0x162894
-#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
-#define _ICL_PORT_TX_DW5_AUX_A 0x162394
-#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_GRP_A, \
- _ICL_PORT_TX_DW5_GRP_B)
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_LN0_A, \
- _ICL_PORT_TX_DW5_LN0_B)
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_AUX_A, \
- _ICL_PORT_TX_DW5_AUX_B)
+#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
+#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
+#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
+#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -2054,47 +2072,10 @@ enum i915_power_well_id {
#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define COMP_INIT (1 << 31)
-#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
-#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-
-#define _ICL_PORT_COMP_DW0_A 0x162100
-#define _ICL_PORT_COMP_DW0_B 0x6C100
-#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
- _ICL_PORT_COMP_DW0_B)
-#define _ICL_PORT_COMP_DW1_A 0x162104
-#define _ICL_PORT_COMP_DW1_B 0x6C104
-#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
- _ICL_PORT_COMP_DW1_B)
-#define _ICL_PORT_COMP_DW3_A 0x16210C
-#define _ICL_PORT_COMP_DW3_B 0x6C10C
-#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
- _ICL_PORT_COMP_DW3_B)
-#define _ICL_PORT_COMP_DW9_A 0x162124
-#define _ICL_PORT_COMP_DW9_B 0x6C124
-#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
- _ICL_PORT_COMP_DW9_B)
-#define _ICL_PORT_COMP_DW10_A 0x162128
-#define _ICL_PORT_COMP_DW10_B 0x6C128
-#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
- _ICL_PORT_COMP_DW10_A, \
- _ICL_PORT_COMP_DW10_B)
+#define FIA1_BASE 0x163000
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
@@ -2417,6 +2398,7 @@ enum i915_power_well_id {
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
@@ -2577,6 +2559,7 @@ enum i915_power_well_id {
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
@@ -3479,11 +3462,13 @@ enum i915_power_well_id {
/*
* Palette regs
*/
-#define PALETTE_A_OFFSET 0xa000
-#define PALETTE_B_OFFSET 0xa800
-#define CHV_PALETTE_C_OFFSET 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
- dev_priv->info.display_mmio_offset + (i) * 4)
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
+ _PICK((pipe), _PALETTE_A, \
+ _PALETTE_B, _CHV_PALETTE_C) + \
+ (i) * 4)
/* MCH MMIO space */
@@ -4065,15 +4050,27 @@ enum {
#define _VSYNCSHIFT_B 0x61028
#define _PIPE_MULT_B 0x6102c
+/* DSI 0 timing regs */
+#define _HTOTAL_DSI0 0x6b000
+#define _HSYNC_DSI0 0x6b008
+#define _VTOTAL_DSI0 0x6b00c
+#define _VSYNC_DSI0 0x6b014
+#define _VSYNCSHIFT_DSI0 0x6b028
+
+/* DSI 1 timing regs */
+#define _HTOTAL_DSI1 0x6b800
+#define _HSYNC_DSI1 0x6b808
+#define _VTOTAL_DSI1 0x6b80c
+#define _VSYNC_DSI1 0x6b814
+#define _VSYNCSHIFT_DSI1 0x6b828
+
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
-
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
- dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+#define TRANSCODER_DSI0_OFFSET 0x6b000
+#define TRANSCODER_DSI1_OFFSET 0x6b800
#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4153,9 +4150,13 @@ enum {
/* Bspec claims those aren't shifted but stay at 0x64800 */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31))
-#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31))
-#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31))
+#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
+#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
+#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
+#define EDP_PSR_TRANSCODER_C_SHIFT 24
+#define EDP_PSR_TRANSCODER_B_SHIFT 16
+#define EDP_PSR_TRANSCODER_A_SHIFT 8
+#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
@@ -4199,7 +4200,7 @@ enum {
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
@@ -4236,7 +4237,7 @@ enum {
#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
-#define PSR_EVENT_REGISTER_UPDATE (1 << 5)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
#define PSR_EVENT_HDCP_ENABLE (1 << 4)
#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
#define PSR_EVENT_VBI_ENABLE (1 << 2)
@@ -4569,6 +4570,7 @@ enum {
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -4588,6 +4590,15 @@ enum {
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
@@ -4595,16 +4606,6 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-#define DRM_DIP_ENABLE (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 25)
-#define VSC_SELECT_SHIFT 25
-#define VSC_DIP_HW_HEA_DATA (0 << 25)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
-#define VSC_DIP_SW_HEA_DATA (3 << 25)
-#define VDIP_ENABLE_PPS (1 << 24)
-
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4617,6 +4618,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
+
+#define _PP_CONTROL_1 0xc7204
+#define _PP_CONTROL_2 0xc7304
+#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+ _PP_CONTROL_2)
+#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
+#define POWER_CYCLE_DELAY_SHIFT 4
+#define VDD_OVERRIDE_FORCE (1 << 3)
+#define BACKLIGHT_ENABLE (1 << 2)
+#define PWR_DOWN_ON_RESET (1 << 1)
+#define PWR_STATE_TARGET (1 << 0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -5640,9 +5652,9 @@ enum {
*/
#define PIPE_EDP_OFFSET 0x7f000
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
- dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+/* ICL DSI 0 and 1 */
+#define PIPE_DSI0_OFFSET 0x7b000
+#define PIPE_DSI1_OFFSET 0x7b800
#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6091,10 +6103,6 @@ enum {
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
- dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-
#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6228,6 +6236,10 @@ enum {
#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+/* ICL DSI 0 and 1 */
+#define _PIPEDSI0CONF 0x7b008
+#define _PIPEDSI1CONF 0x7b808
+
/* Sprite A control */
#define _DVSACNTR 0x72180
#define DVS_ENABLE (1 << 31)
@@ -6515,6 +6527,7 @@ enum {
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
#define PLANE_CTL_YUV422_YUYV (0 << 16)
@@ -6558,17 +6571,33 @@ enum {
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
+#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
+#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
#define _PLANE_AUX_OFFSET_2_A 0x702c4
+#define _PLANE_CUS_CTL_1_A 0x701c8
+#define _PLANE_CUS_CTL_2_A 0x702c8
+#define PLANE_CUS_ENABLE (1 << 31)
+#define PLANE_CUS_PLANE_6 (0 << 30)
+#define PLANE_CUS_PLANE_7 (1 << 30)
+#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
+#define PLANE_CUS_HPHASE_0 (0 << 16)
+#define PLANE_CUS_HPHASE_0_25 (1 << 16)
+#define PLANE_CUS_HPHASE_0_5 (2 << 16)
+#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15)
+#define PLANE_CUS_VPHASE_0 (0 << 12)
+#define PLANE_CUS_VPHASE_0_25 (1 << 12)
+#define PLANE_CUS_VPHASE_0_5 (2 << 12)
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
@@ -6585,6 +6614,55 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+/* Input CSC Register Definitions */
+#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
+#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0
+#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
+ _PLANE_INPUT_CSC_RY_GY_1_B)
+#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+ _PLANE_INPUT_CSC_RY_GY_2_B)
+
+#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2_B)
+#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
@@ -6701,6 +6779,15 @@ enum {
#define PLANE_AUX_OFFSET(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
+#define _PLANE_CUS_CTL_1_B 0x711c8
+#define _PLANE_CUS_CTL_2_B 0x712c8
+#define _PLANE_CUS_CTL_1(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
+#define _PLANE_CUS_CTL_2(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
+#define PLANE_CUS_CTL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
+
#define _PLANE_COLOR_CTL_1_B 0x711CC
#define _PLANE_COLOR_CTL_2_B 0x712CC
#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6854,11 +6941,12 @@ enum {
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
#define PS_SCALER_EN (1 << 31)
-#define PS_SCALER_MODE_MASK (3 << 28)
-#define PS_SCALER_MODE_DYN (0 << 28)
-#define PS_SCALER_MODE_HQ (1 << 28)
+#define SKL_PS_SCALER_MODE_MASK (3 << 28)
+#define SKL_PS_SCALER_MODE_DYN (0 << 28)
+#define SKL_PS_SCALER_MODE_HQ (1 << 28)
#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
#define PS_SCALER_MODE_PLANAR (1 << 29)
+#define PS_SCALER_MODE_NORMAL (0 << 29)
#define PS_PLANE_SEL_MASK (7 << 25)
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
#define PS_FILTER_MASK (3 << 23)
@@ -6875,6 +6963,8 @@ enum {
#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
+#define PS_PLANE_Y_SEL_MASK (7 << 5)
+#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
#define _PS_PWR_GATE_1A 0x68160
#define _PS_PWR_GATE_2A 0x68260
@@ -7321,9 +7411,10 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A 0x420c0
-#define CHICKEN_TRANS_B 0x420c4
-#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define CHICKEN_TRANS_A _MMIO(0x420c0)
+#define CHICKEN_TRANS_B _MMIO(0x420c4)
+#define CHICKEN_TRANS_C _MMIO(0x420c8)
+#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7413,6 +7504,10 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+#define GEN7_SARCHKMD _MMIO(0xB000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7667,6 +7762,7 @@ enum {
#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
@@ -7828,8 +7924,7 @@ enum {
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
-#define ICP_RAWCLK_DEN(den) ((den) << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
#define ICP_RAWCLK_NUM(num) ((num) << 11)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -8629,8 +8724,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
-#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080)
-#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
+#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8931,6 +9025,15 @@ enum skl_power_gate {
#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
+#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define _ICL_AUX_ANAOVRD1_A 0x162398
+#define _ICL_AUX_ANAOVRD1_B 0x6C398
+#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
+ _ICL_AUX_ANAOVRD1_A, \
+ _ICL_AUX_ANAOVRD1_B))
+#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
+#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
+
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER BIT(31)
@@ -9013,11 +9116,45 @@ enum skl_power_gate {
#define HDCP_STATUS_CIPHER BIT(16)
#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define AUTH_LINK_AUTHENTICATED BIT(31)
+#define AUTH_LINK_TYPE BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
+#define AUTH_CLR_KEYS BIT(18)
+
+#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define CTL_LINK_ENCRYPTION_REQ BIT(31)
+
+#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define STREAM_ENCRYPTION_STATUS_A BIT(31)
+#define STREAM_ENCRYPTION_STATUS_B BIT(30)
+#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define LINK_TYPE_STATUS BIT(22)
+#define LINK_AUTH_STATUS BIT(21)
+#define LINK_ENCRYPTION_STATUS BIT(20)
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
@@ -9055,11 +9192,25 @@ enum skl_power_gate {
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
+ _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE (1 << 4)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
@@ -9078,6 +9229,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
@@ -9226,6 +9378,8 @@ enum skl_power_gate {
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_SAMPLING_444 (2 << 1)
+#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
#define TRANS_MSA_6_BPC (0 << 5)
#define TRANS_MSA_8_BPC (1 << 5)
#define TRANS_MSA_10_BPC (2 << 5)
@@ -9793,6 +9947,10 @@ enum skl_power_gate {
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+ dsi0, dsi1)
+
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
@@ -9956,6 +10114,39 @@ enum skl_power_gate {
_ICL_DSI_IO_MODECTL_1)
#define COMBO_PHY_MODE_DSI (1 << 0)
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -10292,6 +10483,235 @@ enum skl_power_gate {
_ICL_DSI_T_INIT_MASTER_0,\
_ICL_DSI_T_INIT_MASTER_1)
+#define _DPHY_CLK_TIMING_PARAM_0 0x162180
+#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
+#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_CLK_TIMING_PARAM_0,\
+ _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0 0x6b080
+#define _DSI_CLK_TIMING_PARAM_1 0x6b880
+#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_CLK_TIMING_PARAM_0,\
+ _DSI_CLK_TIMING_PARAM_1)
+#define CLK_PREPARE_OVERRIDE (1 << 31)
+#define CLK_PREPARE(x) ((x) << 28)
+#define CLK_PREPARE_MASK (0x7 << 28)
+#define CLK_PREPARE_SHIFT 28
+#define CLK_ZERO_OVERRIDE (1 << 27)
+#define CLK_ZERO(x) ((x) << 20)
+#define CLK_ZERO_MASK (0xf << 20)
+#define CLK_ZERO_SHIFT 20
+#define CLK_PRE_OVERRIDE (1 << 19)
+#define CLK_PRE(x) ((x) << 16)
+#define CLK_PRE_MASK (0x3 << 16)
+#define CLK_PRE_SHIFT 16
+#define CLK_POST_OVERRIDE (1 << 15)
+#define CLK_POST(x) ((x) << 8)
+#define CLK_POST_MASK (0x7 << 8)
+#define CLK_POST_SHIFT 8
+#define CLK_TRAIL_OVERRIDE (1 << 7)
+#define CLK_TRAIL(x) ((x) << 0)
+#define CLK_TRAIL_MASK (0xf << 0)
+#define CLK_TRAIL_SHIFT 0
+
+#define _DPHY_DATA_TIMING_PARAM_0 0x162184
+#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
+#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_DATA_TIMING_PARAM_0,\
+ _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0 0x6B084
+#define _DSI_DATA_TIMING_PARAM_1 0x6B884
+#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_DATA_TIMING_PARAM_0,\
+ _DSI_DATA_TIMING_PARAM_1)
+#define HS_PREPARE_OVERRIDE (1 << 31)
+#define HS_PREPARE(x) ((x) << 24)
+#define HS_PREPARE_MASK (0x7 << 24)
+#define HS_PREPARE_SHIFT 24
+#define HS_ZERO_OVERRIDE (1 << 23)
+#define HS_ZERO(x) ((x) << 16)
+#define HS_ZERO_MASK (0xf << 16)
+#define HS_ZERO_SHIFT 16
+#define HS_TRAIL_OVERRIDE (1 << 15)
+#define HS_TRAIL(x) ((x) << 8)
+#define HS_TRAIL_MASK (0x7 << 8)
+#define HS_TRAIL_SHIFT 8
+#define HS_EXIT_OVERRIDE (1 << 7)
+#define HS_EXIT(x) ((x) << 0)
+#define HS_EXIT_MASK (0x7 << 0)
+#define HS_EXIT_SHIFT 0
+
+#define _DPHY_TA_TIMING_PARAM_0 0x162188
+#define _DPHY_TA_TIMING_PARAM_1 0x6c188
+#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_TA_TIMING_PARAM_0,\
+ _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0 0x6b098
+#define _DSI_TA_TIMING_PARAM_1 0x6b898
+#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_TA_TIMING_PARAM_0,\
+ _DSI_TA_TIMING_PARAM_1)
+#define TA_SURE_OVERRIDE (1 << 31)
+#define TA_SURE(x) ((x) << 16)
+#define TA_SURE_MASK (0x1f << 16)
+#define TA_SURE_SHIFT 16
+#define TA_GO_OVERRIDE (1 << 15)
+#define TA_GO(x) ((x) << 8)
+#define TA_GO_MASK (0xf << 8)
+#define TA_GO_SHIFT 8
+#define TA_GET_OVERRIDE (1 << 7)
+#define TA_GET(x) ((x) << 0)
+#define TA_GET_MASK (0xf << 0)
+#define TA_GET_SHIFT 0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0 0x6b030
+#define _DSI_TRANS_FUNC_CONF_1 0x6b830
+#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
+ _DSI_TRANS_FUNC_CONF_0,\
+ _DSI_TRANS_FUNC_CONF_1)
+#define OP_MODE_MASK (0x3 << 28)
+#define OP_MODE_SHIFT 28
+#define CMD_MODE_NO_GATE (0x0 << 28)
+#define CMD_MODE_TE_GATE (0x1 << 28)
+#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
+#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define LINK_READY (1 << 20)
+#define PIX_FMT_MASK (0x3 << 16)
+#define PIX_FMT_SHIFT 16
+#define PIX_FMT_RGB565 (0x0 << 16)
+#define PIX_FMT_RGB666_PACKED (0x1 << 16)
+#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
+#define PIX_FMT_RGB888 (0x3 << 16)
+#define PIX_FMT_RGB101010 (0x4 << 16)
+#define PIX_FMT_RGB121212 (0x5 << 16)
+#define PIX_FMT_COMPRESSED (0x6 << 16)
+#define BGR_TRANSMISSION (1 << 15)
+#define PIX_VIRT_CHAN(x) ((x) << 12)
+#define PIX_VIRT_CHAN_MASK (0x3 << 12)
+#define PIX_VIRT_CHAN_SHIFT 12
+#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
+#define PIX_BUF_THRESHOLD_SHIFT 10
+#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
+#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
+#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
+#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
+#define CONTINUOUS_CLK_MASK (0x3 << 8)
+#define CONTINUOUS_CLK_SHIFT 8
+#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
+#define CLK_HS_OR_LP (0x2 << 8)
+#define CLK_HS_CONTINUOUS (0x3 << 8)
+#define LINK_CALIBRATION_MASK (0x3 << 4)
+#define LINK_CALIBRATION_SHIFT 4
+#define CALIBRATION_DISABLED (0x0 << 4)
+#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
+#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
+#define EOTP_DISABLED (1 << 0)
+
+#define _DSI_CMD_RXCTL_0 0x6b0d4
+#define _DSI_CMD_RXCTL_1 0x6b8d4
+#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_RXCTL_0,\
+ _DSI_CMD_RXCTL_1)
+#define READ_UNLOADS_DW (1 << 16)
+#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
+#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
+#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
+#define RECEIVED_RESET_TRIGGER (1 << 12)
+#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
+#define RECEIVED_CRC_WAS_LOST (1 << 10)
+#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
+#define NUMBER_RX_PLOAD_DW_SHIFT 0
+
+#define _DSI_CMD_TXCTL_0 0x6b0d0
+#define _DSI_CMD_TXCTL_1 0x6b8d0
+#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXCTL_0,\
+ _DSI_CMD_TXCTL_1)
+#define KEEP_LINK_IN_HS (1 << 24)
+#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
+#define FREE_HEADER_CREDIT_SHIFT 0x8
+#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
+#define FREE_PLOAD_CREDIT_SHIFT 0
+#define MAX_HEADER_CREDIT 0x10
+#define MAX_PLOAD_CREDIT 0x40
+
+#define _DSI_CMD_TXHDR_0 0x6b100
+#define _DSI_CMD_TXHDR_1 0x6b900
+#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXHDR_0,\
+ _DSI_CMD_TXHDR_1)
+#define PAYLOAD_PRESENT (1 << 31)
+#define LP_DATA_TRANSFER (1 << 30)
+#define VBLANK_FENCE (1 << 29)
+#define PARAM_WC_MASK (0xffff << 8)
+#define PARAM_WC_LOWER_SHIFT 8
+#define PARAM_WC_UPPER_SHIFT 16
+#define VC_MASK (0x3 << 6)
+#define VC_SHIFT 6
+#define DT_MASK (0x3f << 0)
+#define DT_SHIFT 0
+
+#define _DSI_CMD_TXPYLD_0 0x6b104
+#define _DSI_CMD_TXPYLD_1 0x6b904
+#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXPYLD_0,\
+ _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0 0x6b0d8
+#define _DSI_LP_MSG_1 0x6b8d8
+#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
+ _DSI_LP_MSG_0,\
+ _DSI_LP_MSG_1)
+#define LPTX_IN_PROGRESS (1 << 17)
+#define LINK_IN_ULPS (1 << 16)
+#define LINK_ULPS_TYPE_LP11 (1 << 8)
+#define LINK_ENTER_ULPS (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0 0x6b044
+#define _DSI_HSTX_TO_1 0x6b844
+#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
+ _DSI_HSTX_TO_0,\
+ _DSI_HSTX_TO_1)
+#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define HSTX_TIMEOUT_VALUE_SHIFT 16
+#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
+#define HSTX_TIMED_OUT (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0 0x6b048
+#define _DSI_LPRX_HOST_TO_1 0x6b848
+#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
+ _DSI_LPRX_HOST_TO_0,\
+ _DSI_LPRX_HOST_TO_1)
+#define LPRX_TIMED_OUT (1 << 16)
+#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define LPRX_TIMEOUT_VALUE_SHIFT 0
+#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_PWAIT_TO_0 0x6b040
+#define _DSI_PWAIT_TO_1 0x6b840
+#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
+ _DSI_PWAIT_TO_0,\
+ _DSI_PWAIT_TO_1)
+#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define PRESET_TIMEOUT_VALUE_SHIFT 16
+#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
+#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_TA_TO_0 0x6b04c
+#define _DSI_TA_TO_1 0x6b84c
+#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
+ _DSI_TA_TO_0,\
+ _DSI_TA_TO_1)
+#define TA_TIMED_OUT (1 << 16)
+#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define TA_TIMEOUT_VALUE_SHIFT 0
+#define TA_TIMEOUT_VALUE(x) ((x) << 0)
+
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
@@ -10404,10 +10824,6 @@ enum skl_power_gate {
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
-/* For UMS only (deprecated): */
-#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
@@ -10693,6 +11109,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
@@ -10747,17 +11164,17 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0)
+#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-#define PORT_TX_DFLEXDPPMS _MMIO(0x163890)
+#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894)
+#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385b2089..ca95ab2f4cfa 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
-{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
-{
- kmem_cache_free(i915->dependencies, dep);
-}
-
-static void
-__i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal,
- struct i915_dependency *dep,
- unsigned long flags)
-{
- INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
- dep->signaler = signal;
- dep->flags = flags;
-}
-
-static int
-i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
- struct i915_sched_node *signal)
-{
- struct i915_dependency *dep;
-
- dep = i915_dependency_alloc(i915);
- if (!dep)
- return -ENOMEM;
-
- __i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_ALLOC);
- return 0;
-}
-
-static void
-i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
-{
- struct i915_dependency *dep, *tmp;
-
- GEM_BUG_ON(!list_empty(&node->link));
-
- /*
- * Everyone we depended upon (the fences we wait to be signaled)
- * should retire before us and remove themselves from our list.
- * However, retirement is run independently on each timeline and
- * so we may be called out-of-order.
- */
- list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->wait_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-
- /* Remove ourselves from everyone who depends upon us */
- list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
- GEM_BUG_ON(dep->signaler != node);
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->signal_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-}
-
-static void
-i915_sched_node_init(struct i915_sched_node *node)
-{
- INIT_LIST_HEAD(&node->signalers_list);
- INIT_LIST_HEAD(&node->waiters_list);
- INIT_LIST_HEAD(&node->link);
- node->attr.priority = I915_PRIORITY_INVALID;
-}
-
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
@@ -221,6 +136,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
+ if (seqno == engine->timeline.seqno)
+ continue;
+
+ kthread_park(engine->breadcrumbs.signaler);
+
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +155,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
engine->timeline.seqno = seqno;
+
+ kthread_unpark(engine->breadcrumbs.signaler);
}
list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +662,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (rq)
cond_synchronize_rcu(rq->rcustate);
- /*
- * We've forced the client to stall and catch up with whatever
- * backlog there might have been. As we are assuming that we
- * caused the mempressure, now is an opportune time to
- * recover as much memory from the request pool as is possible.
- * Having already penalized the client to stall, we spend
- * a little extra time to re-optimise page allocation.
- */
- kmem_cache_shrink(i915->requests);
- rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
-
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
@@ -1127,8 +1038,20 @@ void i915_request_add(struct i915_request *request)
*/
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule)
- engine->schedule(request, &request->gem_context->sched);
+ if (engine->schedule) {
+ struct i915_sched_attr attr = request->gem_context->sched;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (!prev || i915_request_completed(prev))
+ attr.priority |= I915_PRIORITY_NEWCLIENT;
+
+ engine->schedule(request, &attr);
+ }
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1233,8 @@ long i915_request_wait(struct i915_request *rq,
add_wait_queue(errq, &reset);
intel_wait_init(&wait);
+ if (flags & I915_WAIT_PRIORITY)
+ i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b024968..90e9d170a0cd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
+#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}
-static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
-{
- const struct i915_request *rq =
- container_of(node, const struct i915_request, sched);
-
- return i915_request_completed(rq);
-}
-
void i915_retire_requests(struct drm_i915_private *i915);
/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 000000000000..340faea6c08a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+static DEFINE_SPINLOCK(schedule_lock);
+
+static const struct i915_request *
+node_to_request(const struct i915_sched_node *node)
+{
+ return container_of(node, const struct i915_request, sched);
+}
+
+static inline bool node_signaled(const struct i915_sched_node *node)
+{
+ return i915_request_completed(node_to_request(node));
+}
+
+void i915_sched_node_init(struct i915_sched_node *node)
+{
+ INIT_LIST_HEAD(&node->signalers_list);
+ INIT_LIST_HEAD(&node->waiters_list);
+ INIT_LIST_HEAD(&node->link);
+ node->attr.priority = I915_PRIORITY_INVALID;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ bool ret = false;
+
+ spin_lock(&schedule_lock);
+
+ if (!node_signaled(signal)) {
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &node->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+
+ ret = true;
+ }
+
+ spin_unlock(&schedule_lock);
+
+ return ret;
+}
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ if (!__i915_sched_node_add_dependency(node, signal, dep,
+ I915_DEPENDENCY_ALLOC))
+ i915_dependency_free(i915, dep);
+
+ return 0;
+}
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node)
+{
+ struct i915_dependency *dep, *tmp;
+
+ GEM_BUG_ON(!list_empty(&node->link));
+
+ spin_lock(&schedule_lock);
+
+ /*
+ * Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(!node_signaled(dep->signaler));
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != node);
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ spin_unlock(&schedule_lock);
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+ long queue_priority)
+{
+ struct rb_node *rb;
+ long last_prio, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+ rb_first(&execlists->queue.rb_root));
+
+ last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ const struct i915_priolist *p = to_priolist(rb);
+
+ GEM_BUG_ON(p->priority >= last_prio);
+ last_prio = p->priority;
+
+ GEM_BUG_ON(!p->used);
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+ if (list_empty(&p->requests[i]))
+ continue;
+
+ GEM_BUG_ON(!(p->used & BIT(i)));
+ }
+ }
+}
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
+ bool first = true;
+ int idx, i;
+
+ lockdep_assert_held(&engine->timeline.lock);
+ assert_priolists(execlists, INT_MAX);
+
+ /* buckets sorted from highest [in slot 0] to lowest priority */
+ idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+ prio >>= I915_USER_PRIORITY_SHIFT;
+ if (unlikely(execlists->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ parent = &execlists->queue.rb_root.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = to_priolist(rb);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
+ first = false;
+ } else {
+ goto out;
+ }
+ }
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &execlists->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ execlists->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+ INIT_LIST_HEAD(&p->requests[i]);
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
+ p->used = 0;
+
+out:
+ p->used |= BIT(idx);
+ return &p->requests[idx];
+}
+
+static struct intel_engine_cs *
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine = node_to_request(node)->engine;
+
+ GEM_BUG_ON(!locked);
+
+ if (engine != locked) {
+ spin_unlock(&locked->timeline.lock);
+ spin_lock(&engine->timeline.lock);
+ }
+
+ return engine;
+}
+
+static void __i915_schedule(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ struct list_head *uninitialized_var(pl);
+ struct intel_engine_cs *engine, *last;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ const int prio = attr->priority;
+ LIST_HEAD(dfs);
+
+ /* Needed in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&schedule_lock);
+ GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
+ if (i915_request_completed(rq))
+ return;
+
+ if (prio <= READ_ONCE(rq->sched.attr.priority))
+ return;
+
+ stack.signaler = &rq->sched;
+ list_add(&stack.dfs_link, &dfs);
+
+ /*
+ * Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_sched_node *node, prio) {
+ * list_for_each_entry(dep, &node->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * queue_request(node);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry(dep, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ /*
+ * Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(p == dep); /* no cycles! */
+
+ if (node_signaled(p->signaler))
+ continue;
+
+ GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+ if (prio > READ_ONCE(p->signaler->attr.priority))
+ list_move_tail(&p->dfs_link, &dfs);
+ }
+ }
+
+ /*
+ * If we didn't need to bump any existing priorities, and we haven't
+ * yet submitted this request (i.e. there is no potential race with
+ * execlists_submit_request()), we can set our own priority and skip
+ * acquiring the engine locks.
+ */
+ if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ rq->sched.attr = *attr;
+
+ if (stack.dfs_link.next == stack.dfs_link.prev)
+ return;
+
+ __list_del_entry(&stack.dfs_link);
+ }
+
+ last = NULL;
+ engine = rq->engine;
+ spin_lock_irq(&engine->timeline.lock);
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = sched_lock_engine(node, engine);
+
+ /* Recheck after acquiring the engine->timeline.lock */
+ if (prio <= node->attr.priority || node_signaled(node))
+ continue;
+
+ node->attr.priority = prio;
+ if (!list_empty(&node->link)) {
+ if (last != engine) {
+ pl = i915_sched_lookup_priolist(engine, prio);
+ last = engine;
+ }
+ list_move_tail(&node->link, pl);
+ } else {
+ /*
+ * If the request is not in the priolist queue because
+ * it is not yet runnable, then it doesn't contribute
+ * to our preemption decisions. On the other hand,
+ * if the request is on the HW, it too is not in the
+ * queue; but in that case we may still need to reorder
+ * the inflight requests.
+ */
+ if (!i915_sw_fence_done(&node_to_request(node)->submit))
+ continue;
+ }
+
+ if (prio <= engine->execlists.queue_priority)
+ continue;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (node_to_request(node)->global_seqno &&
+ i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
+ node_to_request(node)->global_seqno))
+ continue;
+
+ /* Defer (tasklet) submission until after all of our updates. */
+ engine->execlists.queue_priority = prio;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->timeline.lock);
+}
+
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+ spin_lock(&schedule_lock);
+ __i915_schedule(rq, attr);
+ spin_unlock(&schedule_lock);
+}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+ struct i915_sched_attr attr;
+
+ GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+ if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ return;
+
+ spin_lock_bh(&schedule_lock);
+
+ attr = rq->sched.attr;
+ attr.priority |= bump;
+ __i915_schedule(rq, &attr);
+
+ spin_unlock_bh(&schedule_lock);
+}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..dbe9cb7ecd82 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <uapi/drm/i915_drm.h>
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
+#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+
struct i915_sched_attr {
/**
* @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
+void i915_sched_node_init(struct i915_sched_node *node);
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags);
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal);
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node);
+
+void i915_schedule(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed079ae5..fc2eeab823b7 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
/*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4dadfc6..0e055ea0179f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
/*
+ * SPDX-License-Identifier: MIT
+ *
* i915_sw_fence.h - library routines for N:M synchronisation points
*
* Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0cc125c..60404dbb2e9f 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
{
BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
- BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+ BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
*root = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..535caebd9813 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_i915_error_state_buf error_str;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_state *gpu;
ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
- if (ret)
- return ret;
-
- gpu = i915_first_error_state(dev_priv);
- ret = i915_error_state_to_str(&error_str, gpu);
- if (ret)
- goto out;
-
- ret = count < error_str.bytes ? count : error_str.bytes;
- memcpy(buf, error_str.buf, ret);
+ gpu = i915_first_error_state(i915);
+ if (gpu) {
+ ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_state_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
-out:
- i915_gpu_state_put(gpu);
- i915_error_state_buf_release(&error_str);
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab5fb0..ebd71b487220 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
const char *name);
void i915_timeline_fini(struct i915_timeline *tl);
+static inline void
+i915_timeline_set_subclass(struct i915_timeline *timeline,
+ unsigned int subclass)
+{
+ lockdep_set_subclass(&timeline->lock, subclass);
+
+ /*
+ * Due to an interesting quirk in lockdep's internal debug tracking,
+ * after setting a subclass we must ensure the lock is used. Otherwise,
+ * nr_unused_locks is incremented once too often.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ local_irq_disable();
+ lock_map_acquire(&timeline->lock.dep_map);
+ lock_map_release(&timeline->lock.dep_map);
+ local_irq_enable();
+#endif
+}
+
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, const char *name);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd2511568..9726df37c4c4 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
__stringify(x), (long)(x))
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+ __builtin_add_overflow_p((A), (B), (T)0)
#else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
- a + b < a; \
+ (T)(a + b) < a; \
})
#endif
+#define add_overflows(A, B) \
+ add_overflows_t(typeof((A) + (B)), (A), (B))
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
@@ -68,7 +71,7 @@
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+ (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 35fce4c88629..5b4d78cdb4ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
- if (GEM_WARN_ON(range_overflows(vma->node.start,
- vma->node.size,
- vma->vm->total)))
+ if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
return -ENODEV;
- if (GEM_WARN_ON(!flags))
+ if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e43a4d1..4dd793b78996 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,277 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_dsi.h"
+static inline int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ >> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ >> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+ MAX_HEADER_CREDIT, 100))
+ DRM_ERROR("DSI header credits not released\n");
+}
+
+static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+ MAX_PLOAD_CREDIT, 100))
+ DRM_ERROR("DSI payload credits not released\n");
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+ if (port == PORT_A)
+ return TRANSCODER_DSI_0;
+ else
+ return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ int ret;
+
+ /* wait for header/payload credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ wait_for_payload_credits(dev_priv, dsi_trans);
+ }
+
+ /* send nop DCS command */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ dsi->channel = 0;
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret < 0)
+ DRM_ERROR("error sending DCS NOP command\n");
+ }
+
+ /* wait for header credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ }
+
+ /* wait for LP TX in progress bit to be cleared */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LPTX_IN_PROGRESS), 20))
+ DRM_ERROR("LPTX bit not cleared\n");
+ }
+}
+
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+ u32 len)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ int free_credits;
+ int i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ free_credits = payload_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("Payload credit not available\n");
+ return false;
+ }
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+ int free_credits;
+
+ /* check if header credit available */
+ free_credits = header_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ return -1;
+ }
+
+ tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+ if (pkt.payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt)
+{
+ /* payload queue can accept *256 bytes*, check limit */
+ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+ DRM_ERROR("payload size exceeds max queue limit\n");
+ return -1;
+ }
+
+ /* load data into command payload queue */
+ if (!add_payld_to_queue(host, pkt.payload,
+ pkt.payload_length)) {
+ DRM_ERROR("adding payload to queue failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ /*
+ * Program voltage swing and pre-emphasis level values as per
+ * table in BSPEC under DDI buffer programing
+ */
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+
+ for (lane = 0; lane <= 3; lane++) {
+ /* Bspec: must not use GRP register for write */
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+}
+
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dss_ctl1;
+
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ DRM_ERROR("DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ I915_WRITE(DSS_CTL2, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,23 +374,1079 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ /* Step 4b(i) set loadgen select for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ for (lane = 0; lane <= 3; lane++) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~LOADGEN_SELECT;
+ if (lane != 2)
+ tmp |= LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+
+ /* Step 4b(ii) set latency optimization for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ }
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* clear common keeper enable bit */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ }
+
+ /*
+ * Set SUS Clock Config bitfield to 11b
+ * Note: loadgen select program is done
+ * as part of lane phy sequence configuration
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ tmp |= SUS_CLOCK_CONFIG;
+ I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ }
+
+ /* Clear training enable to change swing values */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+
+ /* Program swing and de-emphasis */
+ dsi_program_swing_and_deemphasis(encoder);
+
+ /* Set training enable to trigger update */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 500))
+ DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ }
+}
+
+static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* Program T-INIT master registers */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp &= ~MASTER_INIT_TIMER_MASK;
+ tmp |= intel_dsi->init_count;
+ I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ }
+
+ /* Program DPHY clock lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ }
+
+ /* Program DPHY data lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+ }
+
+ /*
+ * If DSI link operating at or below an 800 MHz,
+ * TA_SURE should be override and programmed to
+ * a value '0' inside TA_PARAM_REGISTERS otherwise
+ * leave all fields at HW default values.
+ */
+ if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+
+ if (intel_dsi->eotp_pkt)
+ tmp &= ~EOTP_DISABLED;
+ else
+ tmp |= EOTP_DISABLED;
+
+ /* enable link calibration if freq > 1.5Gbps */
+ if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ tmp &= ~LINK_CALIBRATION_MASK;
+ tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+ }
+
+ /* configure continuous clock */
+ tmp &= ~CONTINUOUS_CLK_MASK;
+ if (intel_dsi->clock_stop)
+ tmp |= CLK_ENTER_LP_AFTER_DATA;
+ else
+ tmp |= CLK_HS_CONTINUOUS;
+
+ /* configure buffer threshold limit to minimum */
+ tmp &= ~PIX_BUF_THRESHOLD_MASK;
+ tmp |= PIX_BUF_THRESHOLD_1_4;
+
+ /* set virtual channel to '0' */
+ tmp &= ~PIX_VIRT_CHAN_MASK;
+ tmp |= PIX_VIRT_CHAN(0);
+
+ /* program BGR transmission */
+ if (intel_dsi->bgr_enabled)
+ tmp |= BGR_TRANSMISSION;
+
+ /* select pixel format */
+ tmp &= ~PIX_FMT_MASK;
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
+
+ /* program DSI operation mode */
+ if (is_vid_mode(intel_dsi)) {
+ tmp &= ~OP_MODE_MASK;
+ switch (intel_dsi->video_mode_format) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode_format);
+ /* fallthrough */
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+ tmp |= VIDEO_MODE_SYNC_EVENT;
+ break;
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+ tmp |= VIDEO_MODE_SYNC_PULSE;
+ break;
+ }
+ }
+
+ I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ }
+
+ /* enable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp |= PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* select data lane width */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~DDI_PORT_WIDTH_MASK;
+ tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+ /* select input pipe */
+ tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ /* fallthrough */
+ case PIPE_A:
+ tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ }
+
+ /* enable DDI buffer */
+ tmp |= TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* wait for link ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ DRM_ERROR("DSI link not ready\n");
+ }
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ enum port port;
+ enum transcoder dsi_trans;
+ /* horizontal timings */
+ u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+ u16 hfront_porch, hback_porch;
+ /* vertical timings */
+ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ htotal = adjusted_mode->crtc_htotal;
+ hsync_start = adjusted_mode->crtc_hsync_start;
+ hsync_end = adjusted_mode->crtc_hsync_end;
+ hsync_size = hsync_end - hsync_start;
+ hfront_porch = (adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_hdisplay);
+ hback_porch = (adjusted_mode->crtc_htotal -
+ adjusted_mode->crtc_hsync_end);
+ vactive = adjusted_mode->crtc_vdisplay;
+ vtotal = adjusted_mode->crtc_vtotal;
+ vsync_start = adjusted_mode->crtc_vsync_start;
+ vsync_end = adjusted_mode->crtc_vsync_end;
+ vsync_shift = hsync_start - htotal / 2;
+
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ htotal /= 2;
+ }
+
+ /* minimum hactive as per bspec: 256 pixels */
+ if (adjusted_mode->crtc_hdisplay < 256)
+ DRM_ERROR("hactive is less then 256 pixels\n");
+
+ /* if RGB666 format, then hactive must be multiple of 4 pixels */
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+ DRM_ERROR("hactive pixels are not multiple of 4\n");
+
+ /* program TRANS_HTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
+ }
+
+ /* TRANS_HSYNC register to be programmed only for video mode */
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->video_mode_format ==
+ VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+ /* BSPEC: hsync size should be atleast 16 pixels */
+ if (hsync_size < 16)
+ DRM_ERROR("hsync size < 16 pixels\n");
+ }
+
+ if (hback_porch < 16)
+ DRM_ERROR("hback porch < 16 pixels\n");
+
+ if (intel_dsi->dual_link) {
+ hsync_start /= 2;
+ hsync_end /= 2;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
+ }
+ }
+
+ /* program TRANS_VTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ /*
+ * FIXME: Programing this by assuming progressive mode, since
+ * non-interlaced info from VBT is not saved inside
+ * struct drm_display_mode.
+ * For interlace mode: program required pixel minus 2
+ */
+ I915_WRITE(VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+
+ if (vsync_end < vsync_start || vsync_end > vtotal)
+ DRM_ERROR("Invalid vsync_end value\n");
+
+ if (vsync_start < vactive)
+ DRM_ERROR("vsync_start less than vactive\n");
+
+ /* program TRANS_VSYNC register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
+
+ /*
+ * FIXME: It has to be programmed only for interlaced
+ * modes. Put the check condition here once interlaced
+ * info available as described above.
+ * program TRANS_VSYNCSHIFT register
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ }
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp |= PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be enabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE,
+ I965_PIPECONF_ACTIVE, 10))
+ DRM_ERROR("DSI transcoder not enabled\n");
+ }
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+ /*
+ * escape clock count calculation:
+ * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+ * UI (nsec) = (10^6)/Bitrate
+ * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+ * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
+ */
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ mul = 8 * 1000000;
+ hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+ divisor);
+ lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+ ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* program hst_tx_timeout */
+ tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+ tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+ I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_CALIB_TO */
+
+ /* program lp_rx_host timeout */
+ tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+ tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+ I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_PWAIT_TO */
+
+ /* program turn around timeout */
+ tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp &= ~TA_TIMEOUT_VALUE_MASK;
+ tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+ I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ }
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
+
+ /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+ gen11_dsi_config_phy_lanes_sequence(encoder);
+
+ /* step 4c: configure voltage swing and skew */
+ gen11_dsi_voltage_swing_program_seq(encoder);
+
+ /* enable DDI buffer */
+ gen11_dsi_enable_ddi_buffer(encoder);
+
+ /* setup D-PHY timings */
+ gen11_dsi_setup_dphy_timings(encoder);
+
+ /* step 4h: setup DSI protocol timeouts */
+ gen11_dsi_setup_timeouts(encoder);
+
+ /* Step (4h, 4i, 4j, 4k): Configure transcoder */
+ gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
}
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+ int ret;
+
+ /* set maximum return packet size */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /*
+ * FIXME: This uses the number of DW's currently in the payload
+ * receive queue. This is probably not what we want here.
+ */
+ tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp &= NUMBER_RX_PLOAD_DW_MASK;
+ /* multiply "Number Rx Payload DW" by 4 to get max value */
+ tmp = tmp * 4;
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+ if (ret < 0)
+ DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ }
+
+ /* panel power on related mipi dsi vbt sequences */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ /* ensure all panel commands dispatched before enabling transcoder */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
/* step4: enable DSI port and DPHY */
- gen11_dsi_enable_port_and_phy(encoder);
+ gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+ /* step5: program and powerup panel */
+ gen11_dsi_powerup_panel(encoder);
+
+ /* step6c: configure transcoder timings */
+ gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+
+ /* step6d: enable dsi transcoder */
+ gen11_dsi_enable_transcoder(encoder);
+
+ /* step7: enable backlight */
+ intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* disable transcoder */
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp &= ~PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be disabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 0, 50))
+ DRM_ERROR("DSI trancoder not disabled\n");
+ }
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+ /* ensure cmds dispatched to panel */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ /* put dsi link in ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp |= LINK_ENTER_ULPS;
+ tmp &= ~LINK_ULPS_TYPE_LP11;
+ I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+
+ if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
+ 10))
+ DRM_ERROR("DSI link not in ULPS\n");
+ }
+
+ /* disable ddi function */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* disable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp &= ~PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ gen11_dsi_ungate_clocks(encoder);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 8))
+ DRM_ERROR("DDI port:%c buffer not idle\n",
+ port_name(port));
+ }
+ gen11_dsi_ungate_clocks(encoder);
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
+
+ if (intel_dsi->dual_link)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+
+ /* set mode to DDI */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp &= ~COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+}
+
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step1: turn off backlight */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ intel_panel_disable_backlight(old_conn_state);
+
+ /* step2d,e: disable transcoder and wait */
+ gen11_dsi_disable_transcoder(encoder);
+
+ /* step2f,g: powerdown panel */
+ gen11_dsi_powerdown_panel(encoder);
+
+ /* step2h,i,j: deconfig trancoder */
+ gen11_dsi_deconfigure_trancoder(encoder);
+
+ /* step3: disable port */
+ gen11_dsi_disable_port(encoder);
+
+ /* step4: disable IO power */
+ gen11_dsi_disable_io_power(encoder);
+}
+
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 pll_id;
+
+ /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode =
+ intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ pipe_config->clock_set = true;
+ pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u64 domains = 0;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports)
+ if (port == PORT_A)
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+ else
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+ return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ default:
+ DRM_ERROR("Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain);
+ return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
+ enum port port;
+
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
+ return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ /* fill mode info from VBT */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_dsi_vbt_get_modes(intel_dsi);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!fixed_mode) {
+ DRM_ERROR("DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952bacf77..8cb02f28d30c 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
return &crtc_state->base;
}
@@ -203,6 +204,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int j;
+ u32 mode;
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (scaler_state->scalers[j].in_use)
+ continue;
+
+ *scaler_id = j;
+ scaler_state->scalers[*scaler_id].in_use = 1;
+ break;
+ }
+ }
+
+ if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ return;
+
+ /* set scaler mode */
+ if (plane_state && plane_state->base.fb &&
+ plane_state->base.fb->format->is_yuv &&
+ plane_state->base.fb->format->num_planes > 1) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv)) {
+ mode = SKL_PS_SCALER_MODE_NV12;
+ } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ /*
+ * On gen11+'s HDR planes we only use the scaler for
+ * scaling. They have a dedicated chroma upsampler, so
+ * we don't need the scaler to upsample the UV plane.
+ */
+ mode = PS_SCALER_MODE_NORMAL;
+ } else {
+ mode = PS_SCALER_MODE_PLANAR;
+
+ if (plane_state->linked_plane)
+ mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ }
+ } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+ mode = PS_SCALER_MODE_NORMAL;
+ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ /*
+ * when only 1 scaler is in use on a pipe with 2 scalers
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ scaler_state->scalers[*scaler_id].in_use = 0;
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ mode = SKL_PS_SCALER_MODE_HQ;
+ } else {
+ mode = SKL_PS_SCALER_MODE_DYN;
+ }
+
+ DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
+ scaler_state->scalers[*scaler_id].mode = mode;
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
@@ -232,7 +299,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct drm_atomic_state *drm_state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
- int i, j;
+ int i;
num_scalers_need = hweight32(scaler_state->scaler_users);
@@ -304,59 +371,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+ if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
continue;
- }
plane_state = intel_atomic_get_new_plane_state(intel_state,
intel_plane);
scaler_id = &plane_state->scaler_id;
}
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (!scaler_state->scalers[j].in_use) {
- scaler_state->scalers[j].in_use = 1;
- *scaler_id = j;
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
- break;
- }
- }
- }
-
- if (WARN_ON(*scaler_id < 0)) {
- DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
- continue;
- }
-
- /* set scaler mode */
- if ((INTEL_GEN(dev_priv) >= 9) &&
- plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->format ==
- DRM_FORMAT_NV12) {
- if (INTEL_GEN(dev_priv) == 9 &&
- !IS_GEMINILAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv))
- scaler_state->scalers[*scaler_id].mode =
- SKL_PS_SCALER_MODE_NV12;
- else
- scaler_state->scalers[*scaler_id].mode =
- PS_SCALER_MODE_PLANAR;
- } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
- /*
- * when only 1 scaler is in use on either pipe A or B,
- * scaler 0 operates in high quality (HQ) mode.
- * In this case use scaler 0 to take advantage of HQ mode
- */
- *scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
- scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
- scaler_state->scalers[1].in_use = 0;
- } else {
- scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
- }
+ intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0d2e9b..0a73e6e65c20 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
-/**
- * intel_create_plane_state - create plane state object
- * @plane: drm plane
- *
- * Allocates a fresh plane state for the given plane and sets some of
- * the state values to sensible initial values.
- *
- * Returns: A newly allocated plane state, or NULL on failure
- */
-struct intel_plane_state *
-intel_create_plane_state(struct drm_plane *plane)
+struct intel_plane *intel_plane_alloc(void)
{
- struct intel_plane_state *state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return NULL;
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
- state->base.plane = plane;
- state->base.rotation = DRM_MODE_ROTATE_0;
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state) {
+ kfree(plane);
+ return ERR_PTR(-ENOMEM);
+ }
- return state;
+ __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+ plane_state->scaler_id = -1;
+
+ return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+ intel_plane_destroy_state(&plane->base, plane->base.state);
+ kfree(plane);
}
/**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret;
+ crtc_state->active_planes &= ~BIT(intel_plane->id);
+ crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+ intel_state->base.visible = false;
+
+ /* If this is a cursor plane, no further checks are needed. */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
@@ -128,13 +135,12 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
- else
- crtc_state->active_planes &= ~BIT(intel_plane->id);
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
- else
- crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
+ if (state->visible || old_plane_state->base.visible)
+ crtc_state->update_planes |= BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
@@ -152,6 +158,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
+ new_plane_state->visible = false;
if (!crtc)
return 0;
@@ -164,29 +171,123 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
-static void intel_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ unsigned int *update_mask)
{
- struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- const struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state, intel_plane);
- struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (*update_mask == 0)
+ return NULL;
- if (new_plane_state->base.visible) {
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
- trace_intel_update_plane(plane,
- to_intel_crtc(crtc));
+ if (crtc->pipe != plane->pipe ||
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ entries_y,
+ I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ entries_uv,
+ I915_MAX_PLANES, plane_id))
+ continue;
+
+ *update_mask &= ~BIT(plane_id);
+ entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ return plane;
+ }
- intel_plane->update_plane(intel_plane,
- new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(plane,
- to_intel_crtc(crtc));
+ /* should never happen */
+ WARN_ON(1);
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+ memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc,
+ entries_y, entries_uv,
+ &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else if (new_plane_state->slave) {
+ struct intel_plane *master =
+ new_plane_state->linked_plane;
+
+ /*
+ * We update the slave plane from this function because
+ * programming it from the master plane's update_plane
+ * callback runs into issues when the Y plane is
+ * reassigned, disabled or used by a different plane.
+ *
+ * The slave plane is updated with the master plane's
+ * plane_state.
+ */
+ new_plane_state =
+ intel_atomic_get_new_plane_state(state, master);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
+ }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
}
}
@@ -194,7 +295,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
- .atomic_update = intel_plane_atomic_update,
};
/**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ee3ca2de983b..ae55a6865d5c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -153,32 +153,32 @@ static const struct {
int n;
int cts;
} hdmi_aud_ncts[] = {
- { 44100, TMDS_296M, 4459, 234375 },
- { 44100, TMDS_297M, 4704, 247500 },
- { 48000, TMDS_296M, 5824, 281250 },
- { 48000, TMDS_297M, 5120, 247500 },
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 44100, TMDS_296M, 4459, 234375 },
+ { 44100, TMDS_297M, 4704, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
- { 96000, TMDS_296M, 11648, 281250 },
- { 96000, TMDS_297M, 10240, 247500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
- { 192000, TMDS_296M, 23296, 281250 },
- { 192000, TMDS_297M, 20480, 247500 },
- { 44100, TMDS_593M, 8918, 937500 },
- { 44100, TMDS_594M, 9408, 990000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 48000, TMDS_296M, 5824, 281250 },
+ { 48000, TMDS_297M, 5120, 247500 },
{ 48000, TMDS_593M, 5824, 562500 },
{ 48000, TMDS_594M, 6144, 594000 },
- { 32000, TMDS_593M, 5824, 843750 },
- { 32000, TMDS_594M, 3072, 445500 },
- { 88200, TMDS_593M, 17836, 937500 },
- { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_296M, 11648, 281250 },
+ { 96000, TMDS_297M, 10240, 247500 },
{ 96000, TMDS_593M, 11648, 562500 },
{ 96000, TMDS_594M, 12288, 594000 },
- { 176400, TMDS_593M, 35672, 937500 },
- { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_296M, 23296, 281250 },
+ { 192000, TMDS_297M, 20480, 247500 },
{ 192000, TMDS_593M, 23296, 562500 },
{ 192000, TMDS_594M, 24576, 594000 },
};
@@ -929,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
+ if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = &i915_audio_component_ops;
acomp->base.dev = i915_kdev;
@@ -952,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
+
+ device_link_remove(hda_kdev, i915_kdev);
}
static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494e2bc9..6d3e0260d49c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (bdb->version >= 181) {
+ dev_priv->vbt.orientation = general->rotate_180 ?
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+ DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ } else {
+ dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ }
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+ /* FIXME is the 90 vs. 270 correct? */
+ switch (config->rotation) {
+ case ENABLE_ROTATION_0:
+ /*
+ * Most (all?) VBTs claim 0 degrees despite having
+ * an upside down panel, thus we do not trust this.
+ */
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ break;
+ case ENABLE_ROTATION_90:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ break;
+ case ENABLE_ROTATION_180:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ break;
+ case ENABLE_ROTATION_270:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ break;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1721,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (!HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
- switch (dvo_port) {
- case DVO_PORT_MIPIA:
- case DVO_PORT_MIPIC:
+ if (dvo_port == DVO_PORT_MIPIA ||
+ (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
+ (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
- case DVO_PORT_MIPIB:
- case DVO_PORT_MIPID:
+ } else if (dvo_port == DVO_PORT_MIPIB ||
+ dvo_port == DVO_PORT_MIPIC ||
+ dvo_port == DVO_PORT_MIPID) {
DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
- break;
}
}
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
return false;
}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum aux_ch aux_ch;
+
+ if (!info->alternate_aux_channel) {
+ aux_ch = (enum aux_ch)port;
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_ch = AUX_CH_A;
+ break;
+ case DP_AUX_B:
+ aux_ch = AUX_CH_B;
+ break;
+ case DP_AUX_C:
+ aux_ch = AUX_CH_C;
+ break;
+ case DP_AUX_D:
+ aux_ch = AUX_CH_D;
+ break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
+ case DP_AUX_F:
+ aux_ch = AUX_CH_F;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_ch = AUX_CH_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..447c5256f63a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
#include "i915_drv.h"
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 8d74276029e6..25e3aba9cded 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2660,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
fraction = 200;
}
- rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
- if (fraction)
- rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
- fraction) - 1);
+ rawclk = CNP_RAWCLK_DIV(divider / 1000);
+ if (fraction) {
+ int numerator = 1;
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return divider + fraction;
-}
-
-static int icp_rawclk(struct drm_i915_private *dev_priv)
-{
- u32 rawclk;
- int divider, numerator, denominator, frequency;
-
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
- frequency = 24000;
- divider = 23;
- numerator = 0;
- denominator = 0;
- } else {
- frequency = 19200;
- divider = 18;
- numerator = 1;
- denominator = 4;
+ rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+ fraction) - 1);
+ if (HAS_PCH_ICP(dev_priv))
+ rawclk |= ICP_RAWCLK_NUM(numerator);
}
- rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
- ICP_RAWCLK_DEN(denominator);
-
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return frequency;
+ return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2740,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_ICP(dev_priv))
- dev_priv->rawclk_freq = icp_rawclk(dev_priv);
- else if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7beabd58d..5127da286a2b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = intel_crtc_state->limited_color_range;
- if (intel_crtc_state->ycbcr420) {
+ if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 000000000000..3d0271cebf99
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+ for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+ u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+ [PROCMON_0_95V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+ [PROCMON_0_95V_DOT_1] =
+ { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+ [PROCMON_1_05V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+ [PROCMON_1_05V_DOT_1] =
+ { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ val = I915_READ(ICL_PORT_COMP_DW3(port));
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ /* fall through */
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+ I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t reg, u32 mask,
+ u32 expected_val)
+{
+ u32 val = I915_READ(reg);
+
+ if ((val & mask) != expected_val) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ port_name(port),
+ reg.reg, val, mask, expected_val);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ bool ret;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ (0xff << 16) | 0xff, procmon->dw1);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ -1U, procmon->dw9);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ -1U, procmon->dw10);
+
+ return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+ return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+ enum port port = PORT_A;
+ bool ret;
+
+ if (!cnl_combo_phy_enabled(dev_priv))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val &= ~CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+
+ /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+ cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+ val = I915_READ(CNL_PORT_COMP_DW0);
+ val |= COMP_INIT;
+ I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+ val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!cnl_combo_phy_verify_state(dev_priv))
+ DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val |= CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ return !(I915_READ(ICL_PHY_MISC(port)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ bool ret;
+
+ if (!icl_combo_phy_enabled(dev_priv, port))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port(dev_priv, port) {
+ u32 val;
+
+ if (icl_combo_phy_verify_state(dev_priv, port)) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+ port_name(port));
+ continue;
+ }
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ cnl_set_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val |= COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ }
+}
+
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port_reverse(dev_priv, port) {
+ u32 val;
+
+ if (!icl_combo_phy_verify_state(dev_priv, port))
+ DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+ port_name(port));
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val &= ~COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e24..18e370f607bc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -25,11 +25,140 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct intel_digital_connector_state *conn_state;
+
+ /*
+ * Allocate enough memory to hold intel_digital_connector_state,
+ * This might be a few bytes too many, but for connectors that don't
+ * need it we'll free the state and allocate a smaller one on the first
+ * successful commit anyway.
+ */
+ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (!conn_state)
+ return -ENOMEM;
+
+ __drm_atomic_helper_connector_reset(&connector->base,
+ &conn_state->base);
+
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ kfree(intel_connector->detect_edid);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ intel_panel_fini(&intel_connector->panel);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
+
+ if (i915_inject_load_failure()) {
+ ret = -EFAULT;
+ goto err_backlight;
+ }
+
+ return 0;
+
+err_backlight:
+ intel_backlight_device_unregister(intel_connector);
+err:
+ return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!connector->base.state->crtc)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82bb059..68f2fb89ece3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
return status;
}
-static void intel_crt_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_crt_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e9ddad..a516697bf57d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,38 @@
* low-power state and comes back to normal.
*/
-#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_ICL);
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_CSR_PATH);
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_CNL);
+#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_CSR_PATH);
+
+#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define GLK_CSR_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_CSR_PATH);
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_CSR_PATH);
-#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
-MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
+#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_CSR_PATH);
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-
-
#define BXT_CSR_MAX_FW_SIZE 0x3000
-#define GLK_CSR_MAX_FW_SIZE 0x4000
-#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(BXT_CSR_PATH);
+
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
+static const struct stepping_info icl_stepping_info[] = {
+ {'A', '0'}, {'A', '1'}, {'A', '2'},
+ {'B', '0'}, {'B', '2'},
+ {'C', '0'}
+};
+
static const struct stepping_info no_stepping_info = { '*', '*' };
static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ size = ARRAY_SIZE(icl_stepping_info);
+ si = icl_stepping_info;
+ } else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- uint32_t max_fw_size = 0;
uint32_t i;
uint32_t *dmc_payload;
- uint32_t required_version;
if (!fw)
return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
- csr->version = css_header->version;
-
- if (csr->fw_path == i915_modparams.dmc_firmware_path) {
- /* Bypass version check for firmware override. */
- required_version = csr->version;
- } else if (IS_ICELAKE(dev_priv)) {
- required_version = ICL_CSR_VERSION_REQUIRED;
- } else if (IS_CANNONLAKE(dev_priv)) {
- required_version = CNL_CSR_VERSION_REQUIRED;
- } else if (IS_GEMINILAKE(dev_priv)) {
- required_version = GLK_CSR_VERSION_REQUIRED;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- required_version = KBL_CSR_VERSION_REQUIRED;
- } else if (IS_SKYLAKE(dev_priv)) {
- required_version = SKL_CSR_VERSION_REQUIRED;
- } else if (IS_BROXTON(dev_priv)) {
- required_version = BXT_CSR_VERSION_REQUIRED;
- } else {
- MISSING_CASE(INTEL_REVID(dev_priv));
- required_version = 0;
- }
-
- if (csr->version != required_version) {
+ if (csr->required_version &&
+ css_header->version != csr->required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
- CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(required_version),
- CSR_VERSION_MINOR(required_version));
+ CSR_VERSION_MAJOR(css_header->version),
+ CSR_VERSION_MINOR(css_header->version),
+ CSR_VERSION_MAJOR(csr->required_version),
+ CSR_VERSION_MINOR(csr->required_version));
return NULL;
}
+ csr->version = css_header->version;
+
readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
- if (INTEL_GEN(dev_priv) >= 11)
- max_fw_size = ICL_CSR_MAX_FW_SIZE;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
- max_fw_size = GLK_CSR_MAX_FW_SIZE;
- else if (IS_GEN9(dev_priv))
- max_fw_size = BXT_CSR_MAX_FW_SIZE;
- else
- MISSING_CASE(INTEL_REVID(dev_priv));
- if (nbytes > max_fw_size) {
+ if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
}
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (i915_modparams.dmc_firmware_path)
- csr->fw_path = i915_modparams.dmc_firmware_path;
- else if (IS_ICELAKE(dev_priv))
- csr->fw_path = I915_CSR_ICL;
- else if (IS_CANNONLAKE(dev_priv))
- csr->fw_path = I915_CSR_CNL;
- else if (IS_GEMINILAKE(dev_priv))
- csr->fw_path = I915_CSR_GLK;
- else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- csr->fw_path = I915_CSR_KBL;
- else if (IS_SKYLAKE(dev_priv))
- csr->fw_path = I915_CSR_SKL;
- else if (IS_BROXTON(dev_priv))
- csr->fw_path = I915_CSR_BXT;
-
/*
- * Obtain a runtime pm reference, until CSR is loaded,
- * to avoid entering runtime-suspend.
+ * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+ * runtime-suspend.
+ *
+ * On error, we return with the rpm wakeref held to prevent runtime
+ * suspend as runtime suspend *requires* a working CSR for whatever
+ * reason.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* Allow to load fw via parameter using the last known size */
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (IS_ICELAKE(dev_priv)) {
+ csr->fw_path = ICL_CSR_PATH;
+ csr->required_version = ICL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ csr->fw_path = CNL_CSR_PATH;
+ csr->required_version = CNL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ csr->fw_path = GLK_CSR_PATH;
+ csr->required_version = GLK_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ csr->fw_path = KBL_CSR_PATH;
+ csr->required_version = KBL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ csr->fw_path = SKL_CSR_PATH;
+ csr->required_version = SKL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+ } else if (IS_BROXTON(dev_priv)) {
+ csr->fw_path = BXT_CSR_PATH;
+ csr->required_version = BXT_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ }
+
+ if (i915_modparams.dmc_firmware_path) {
+ if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+ csr->fw_path = NULL;
+ DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ return;
+ }
+
+ csr->fw_path = i915_modparams.dmc_firmware_path;
+ /* Bypass version check for firmware override. */
+ csr->required_version = 0;
+ }
+
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7075f9..f3e1d6a0b7dd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_dsi.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -642,7 +643,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +659,7 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +681,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -1060,10 +1061,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int clock = crtc->config->port_clock;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1517,7 +1518,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
- if (pipe_config->ycbcr420)
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@@ -1737,16 +1738,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_ICELAKE(dev_priv))
- icl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_BC(dev_priv))
+ skl_ddi_clock_get(encoder, pipe_config);
+ else if (INTEL_GEN(dev_priv) <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1785,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
break;
}
+ /*
+ * As per DP 1.2 spec section 2.3.4.3 while sending
+ * YCBCR 444 signals we should program MSA MISC1/0 fields with
+ * colorspace information. The output colorspace encoding is BT601.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1998,24 +2006,24 @@ out:
return ret;
}
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+ u8 *pipe_mask, bool *is_dp_mst)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
enum pipe p;
u32 tmp;
- bool ret;
+ u8 mst_pipe_mask;
+
+ *pipe_mask = 0;
+ *is_dp_mst = false;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
- return false;
-
- ret = false;
+ return;
tmp = I915_READ(DDI_BUF_CTL(port));
-
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
@@ -2023,44 +2031,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+ /* fallthrough */
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- *pipe = PIPE_A;
+ *pipe_mask = BIT(PIPE_A);
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- *pipe = PIPE_B;
+ *pipe_mask = BIT(PIPE_B);
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- *pipe = PIPE_C;
+ *pipe_mask = BIT(PIPE_C);
break;
}
- ret = true;
-
goto out;
}
+ mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
- enum transcoder cpu_transcoder = (enum transcoder) p;
+ enum transcoder cpu_transcoder = (enum transcoder)p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
- TRANS_DDI_MODE_SELECT_DP_MST)
- goto out;
+ if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ continue;
- *pipe = p;
- ret = true;
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ mst_pipe_mask |= BIT(p);
- goto out;
- }
+ *pipe_mask |= BIT(p);
+ }
+
+ if (!*pipe_mask)
+ DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
+ port_name(port));
+
+ if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+ DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
+ port_name(port), *pipe_mask);
+ *pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+ if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+ DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
+ port_name(port), *pipe_mask, mst_pipe_mask);
+ else
+ *is_dp_mst = mst_pipe_mask;
out:
- if (ret && IS_GEN9_LP(dev_priv)) {
+ if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2092,26 @@ out:
}
intel_display_power_put(dev_priv, encoder->power_domain);
+}
- return ret;
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ u8 pipe_mask;
+ bool is_mst;
+
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+ if (is_mst || !pipe_mask)
+ return false;
+
+ *pipe = ffs(pipe_mask) - 1;
+
+ return true;
}
static inline enum intel_display_power_domain
-intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2125,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
* Note that PSR is enabled only on Port A even though this function
* returns the correct domain for other ports too.
*/
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
+ return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_aux_power_domain(dig_port);
}
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
u64 domains;
@@ -2110,12 +2147,19 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(&encoder->base);
domains = BIT_ULL(dig_port->ddi_io_power_domain);
- /* AUX power is only needed for (e)DP mode, not for HDMI. */
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- struct intel_dp *intel_dp = &dig_port->dp;
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+ * ports.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
- }
+ /*
+ * VDSC power is needed when DSC is enabled
+ */
+ if (crtc_state->dsc_params.compression_enable)
+ domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
return domains;
}
@@ -2748,77 +2792,130 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
return 0;
}
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- int i;
+ enum port port = encoder->port;
+ u32 val;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- enum port port;
- uint32_t val;
+ mutex_lock(&dev_priv->dpll_lock);
- if (conn_state->crtc != crtc)
- continue;
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+ }
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
- }
+ mutex_unlock(&dev_priv->dpll_lock);
+}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
- mutex_unlock(&dev_priv->dpll_lock);
- }
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+ enum port port;
+ u32 port_mask;
+ bool ddi_clk_needed;
+
+ /*
+ * In case of DP MST, we sanitize the primary encoder only, not the
+ * virtual ones.
+ */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
- enum port port;
+ if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+ u8 pipe_mask;
+ bool is_mst;
- if (old_conn_state->crtc != crtc)
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+ /*
+ * In the unlikely case that BIOS enables DP in MST mode, just
+ * warn since our MST HW readout is incomplete.
+ */
+ if (WARN_ON(is_mst))
+ return;
+ }
+
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
+
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
+
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * DSI ports should have their DDI clock ungated when disabled
+ * and gated when enabled.
+ */
+ ddi_clk_needed = !encoder->base.crtc;
+ }
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_port_masked(port, port_mask) {
+ bool ddi_clk_ungated = !(val &
+ icl_dpclka_cfgcr0_clk_off(dev_priv,
+ port));
+
+ if (ddi_clk_needed == ddi_clk_ungated)
continue;
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
- I915_WRITE(DPCLKA_CFGCR0_ICL,
- I915_READ(DPCLKA_CFGCR0_ICL) |
- icl_dpclka_cfgcr0_clk_off(dev_priv, port));
- mutex_unlock(&dev_priv->dpll_lock);
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ port_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
}
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint32_t val;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
return;
@@ -2828,7 +2925,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_ICELAKE(dev_priv)) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_pll_sel(encoder, pll));
+ icl_pll_to_ddi_pll_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2978,184 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
+static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING);
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 ln0, ln1, lane_info;
+
+ if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ ln0 = I915_READ(MG_DP_MODE(port, 0));
+ ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+ switch (intel_dig_port->tc_type) {
+ case TC_PORT_TYPEC:
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+ switch (lane_info) {
+ case 0x1:
+ case 0x4:
+ break;
+ case 0x2:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0x3:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0x8:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0xC:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0xF:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ default:
+ MISSING_CASE(lane_info);
+ }
+ break;
+
+ case TC_PORT_LEGACY:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+
+ default:
+ MISSING_CASE(intel_dig_port->tc_type);
+ return;
+ }
+
+ I915_WRITE(MG_DP_MODE(port, 0), ln0);
+ I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ 1))
+ DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2894,19 +3169,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
-
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_edp_panel_on(intel_dp);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(intel_dp);
+ icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
@@ -2922,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+ intel_ddi_enable_fec(encoder, crtc_state);
+
icl_enable_phy_clock_gating(dig_port);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
+
+ intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2944,10 +3223,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ icl_program_mg_dp_mode(dig_port);
+ icl_disable_phy_clock_gating(dig_port);
+
if (IS_ICELAKE(dev_priv))
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3240,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
+ icl_enable_phy_clock_gating(dig_port);
+
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
@@ -2991,15 +3275,31 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(encoder, crtc_state);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
- else
+ } else {
+ struct intel_lspcon *lspcon =
+ enc_to_intel_lspcon(&encoder->base);
+
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ if (lspcon->active) {
+ struct intel_digital_port *dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
+ }
+ }
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3018,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
@@ -3041,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3049,9 +3352,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
-
- intel_display_power_put(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,12 +3362,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- dig_port->set_infoframes(&encoder->base, false,
+ dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
@@ -3080,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/*
* When called from DP MST code:
* - old_conn_state will be NULL
@@ -3099,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
else
intel_ddi_post_disable_dp(encoder,
old_crtc_state, old_conn_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(encoder);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3118,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3154,6 +3459,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ static const i915_reg_t regs[] = {
+ [PORT_A] = CHICKEN_TRANS_EDP,
+ [PORT_B] = CHICKEN_TRANS_A,
+ [PORT_C] = CHICKEN_TRANS_B,
+ [PORT_D] = CHICKEN_TRANS_C,
+ [PORT_E] = CHICKEN_TRANS_A,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(port < PORT_A || port > PORT_E))
+ port = PORT_A;
+
+ return regs[port];
+}
+
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3177,17 +3502,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- static const enum transcoder port_to_transcoder[] = {
- [PORT_A] = TRANSCODER_EDP,
- [PORT_B] = TRANSCODER_A,
- [PORT_C] = TRANSCODER_B,
- [PORT_D] = TRANSCODER_C,
- [PORT_E] = TRANSCODER_A,
- };
- enum transcoder transcoder = port_to_transcoder[port];
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(CHICKEN_TRANS(transcoder));
+ val = I915_READ(reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3514,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
- POSTING_READ(CHICKEN_TRANS(transcoder));
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
udelay(1);
@@ -3208,7 +3526,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
+ I915_WRITE(reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3252,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3282,13 +3603,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ enum port port)
{
- uint8_t mask = pipe_config->lane_lat_optim_mask;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ switch (pipe_config->lane_count) {
+ case 1:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ break;
+ case 2:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ break;
+ default:
+ MISSING_CASE(pipe_config->lane_count);
+ }
+ I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+}
- bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
+static void
+intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum port port = encoder->port;
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
+
+ /*
+ * Program the lane count for static/dynamic connections on Type-C ports.
+ * Skip this step for TBT.
+ */
+ if (dig_port->tc_type == TC_PORT_UNKNOWN ||
+ dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
+}
+
+static void
+intel_ddi_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3737,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
+ else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3790,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4151,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
+ enum pipe pipe;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4190,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_GEN9_LP(dev_priv))
- intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+ intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4202,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
+ for_each_pipe(dev_priv, pipe)
+ intel_encoder->crtc_mask |= BIT(pipe);
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4214,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
switch (port) {
case PORT_A:
@@ -3858,8 +4245,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
MISSING_CASE(port);
}
- intel_infoframe_init(intel_dig_port);
-
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
@@ -3888,6 +4273,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
port_name(port));
}
+ intel_infoframe_init(intel_dig_port);
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 01fa98299bae..1e56319334f3 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
}
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -744,27 +748,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) == 9) {
+ } else if (IS_GEN9(dev_priv)) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES >
- sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+ BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
- /*
- * Skylake and Broxton currently don't expose the topmost plane as its
- * use is exclusive with the legacy cursor and we only want to expose
- * one of those, not both. Until we can safely expose the topmost plane
- * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
- * we don't expose the topmost plane at all to prevent ABI breakage
- * down the line.
- */
- if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (IS_GEN11(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 6;
+ else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
@@ -779,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
+ } else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -804,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -844,13 +851,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
gen9_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 10)
+ else if (IS_GEN10(dev_priv))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
+ if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+ DRM_INFO("Disabling ppGTT for VT-d support\n");
+ info->ppgtt = INTEL_PPGTT_NONE;
+ }
+
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
@@ -872,40 +884,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- u8 vdbox_disable, vebox_disable;
u32 media_fuse;
- int i;
+ unsigned int i;
if (INTEL_GEN(dev_priv) < 11)
return;
- media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+ media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+ DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & vdbox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ if (!(BIT(i) & info->vdbox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ }
}
- DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+ DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & vebox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ if (!(BIT(i) & info->vebox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64734d5..1caf24e2cf0b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
#ifndef _INTEL_DEVICE_INFO_H_
#define _INTEL_DEVICE_INFO_H_
+#include <uapi/drm/i915_drm.h>
+
#include "intel_display.h"
struct drm_printer;
@@ -74,51 +76,58 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
+enum intel_ppgtt {
+ INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
+ INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
+ INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
+ INTEL_PPGTT_FULL_4LVL,
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
func(has_reset_engine); \
- func(has_fbc); \
func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
- func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
- func(has_overlay); \
func(has_pooled_eu); \
- func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
+ func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+ /* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(hws_needs_physical); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_gmch_display); \
+ func(has_hotplug); \
+ func(has_ipc); \
+ func(has_overlay); \
+ func(has_psr); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
+ func(supports_tv);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SUBSLICES];
+ u8 subslice_mask[GEN_MAX_SLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -154,6 +163,7 @@ struct intel_device_info {
enum intel_platform platform;
u32 platform_mask;
+ enum intel_ppgtt ppgtt;
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -165,12 +175,18 @@ struct intel_device_info {
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
+
+ struct {
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ } display;
+
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
@@ -178,6 +194,10 @@ struct intel_device_info {
u32 cs_timestamp_frequency_khz;
+ /* Enabled (not fused off) media engine bitmasks. */
+ u8 vdbox_enable;
+ u8 vebox_enable;
+
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a54843fdeb2f..07c861884c70 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static const uint32_t skl_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
-};
-
-static const uint32_t skl_pri_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
-};
-
-static const uint64_t skl_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const uint64_t skl_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2);
-static void ironlake_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipemisc(struct drm_crtc *crtc);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(struct intel_crtc *crtc);
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
-static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
- if (enable)
- I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
- else
- I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
-}
-
-static void
skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
+ /* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
}
}
-static void i9xx_disable_pll(struct intel_crtc *crtc)
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
uint32_t val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
+ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv) &&
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static u32 intel_adjust_aligned_offset(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier;
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2400,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
*x = 0;
*y = 0;
@@ -2574,7 +2531,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, i)) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
@@ -2788,10 +2745,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
else
crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-
- DRM_DEBUG_KMS("%s active planes 0x%x\n",
- crtc_state->base.crtc->name,
- crtc_state->active_planes);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2772,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
+
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -2826,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ plane->disable_plane(plane, crtc_state);
}
static void
@@ -2890,6 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ intel_state->base.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
intel_state->base.rotation);
intel_state->color_plane[0].stride =
@@ -3098,28 +3056,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
return 0;
}
-static int
-skl_check_nv12_surface(struct intel_plane_state *plane_state)
-{
- /* Display WA #1106 */
- if (plane_state->base.rotation !=
- (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
- plane_state->base.rotation != DRM_MODE_ROTATE_270)
- return 0;
-
- /*
- * src coordinates are rotated here.
- * We check height but report it as width
- */
- if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
- DRM_DEBUG_KMS("src width must be multiple "
- "of 4 for rotated NV12\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3198,9 +3134,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* the main surface setup depends on it.
*/
if (fb->format->format == DRM_FORMAT_NV12) {
- ret = skl_check_nv12_surface(plane_state);
- if (ret)
- return ret;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3398,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -3413,48 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
if (INTEL_GEN(dev_priv) < 4) {
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- } else {
+ else
I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- }
- POSTING_READ_FW(reg);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3467,7 +3402,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
- POSTING_READ_FW(DSPCNTR(i9xx_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -3527,13 +3461,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_scaler_state *scaler_state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- scaler_state = &intel_crtc->config->scaler_state;
-
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
@@ -3541,6 +3475,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
@@ -3551,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
if (color_plane >= fb->format->num_planes)
return 0;
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, color_plane);
- else
- stride /= intel_fb_stride_alignment(fb, color_plane);
-
- return stride;
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -3597,29 +3537,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
-/*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
-static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
-static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -3696,7 +3645,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
- plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3731,6 +3680,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) < 11) {
@@ -3738,9 +3688,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
}
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
- plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3748,6 +3698,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
}
return plane_color_ctl;
@@ -3932,15 +3884,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(crtc);
+ skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(crtc);
+ skylake_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(crtc);
+ ironlake_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(crtc, true);
+ ironlake_pfit_disable(old_crtc_state);
}
}
@@ -4339,10 +4291,10 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4351,7 +4303,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -4500,10 +4452,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
}
/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct intel_crtc *crtc)
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc->config->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -4614,12 +4567,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -4638,9 +4591,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
@@ -4659,22 +4611,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- switch (intel_crtc->pipe) {
+ switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev, false);
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
else
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
default:
@@ -4731,7 +4684,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc);
+ ivybridge_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -4764,11 +4717,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- intel_enable_shared_dpll(crtc);
+ intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc, pipe);
+ ironlake_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -4800,7 +4753,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4812,10 +4765,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
- lpt_program_iclkip(crtc);
+ lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
+ ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
@@ -4903,8 +4856,7 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- bool plane_scaler_check,
- uint32_t pixel_format)
+ const struct drm_format_info *format, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
@@ -4913,21 +4865,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int need_scaling;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- need_scaling = src_w != dst_w || src_h != dst_h;
-
- if (plane_scaler_check)
- if (pixel_format == DRM_FORMAT_NV12)
- need_scaling = true;
-
- if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
- need_scaling = true;
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
/*
* Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4936,7 +4881,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* for NV12.
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
- need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -4951,7 +4896,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
- if (force_detach || !need_scaling) {
+ if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4965,7 +4910,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+ if (format && format->format == DRM_FORMAT_NV12 &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("NV12: src dimensions not met\n");
return -EINVAL;
@@ -5008,12 +4953,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ bool need_scaler = false;
+
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ need_scaler = true;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, false, 0);
+ adjusted_mode->crtc_vdisplay, NULL, need_scaler);
}
/**
@@ -5028,13 +4977,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
-
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
-
bool force_detach = !fb || !plane_state->base.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(intel_plane) &&
+ fb && fb->format->format == DRM_FORMAT_NV12)
+ need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -5043,7 +4996,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
drm_rect_height(&plane_state->base.dst),
- fb ? true : false, fb ? fb->format->format : 0);
+ fb ? fb->format : NULL, need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5089,27 +5042,27 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
- struct intel_crtc_scaler_state *scaler_state =
- &crtc->config->scaler_state;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+ if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
return;
- pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
- pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
+ pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+ pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
- hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
- vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
+ hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+ vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -5121,18 +5074,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
}
}
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -5142,8 +5095,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
}
}
@@ -5338,11 +5291,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
if (!crtc_state->nv12_planes)
return false;
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
- IS_CANNONLAKE(dev_priv))
+ /* WA Display #0827: Gen9:all */
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
return true;
return false;
@@ -5385,7 +5335,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, false);
- skl_wa_528(dev_priv, crtc->pipe, false);
}
}
@@ -5425,7 +5374,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, true);
- skl_wa_528(dev_priv, crtc->pipe, true);
}
/*
@@ -5448,7 +5396,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
+ if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+ old_crtc_state->base.active)
intel_wait_for_vblank(dev_priv, crtc->pipe);
/*
@@ -5479,24 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(crtc);
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *p;
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ unsigned fb_bits = 0;
+ int i;
- intel_crtc_dpms_overlay_disable(intel_crtc);
+ intel_crtc_dpms_overlay_disable(crtc);
- drm_for_each_plane_mask(p, dev, plane_mask)
- to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
- /*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip to a NULL plane.
- */
- intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ plane->disable_plane(plane, new_crtc_state);
+
+ if (old_plane_state->base.visible)
+ fb_bits |= plane->frontbuffer_bit;
+ }
+
+ intel_frontbuffer_flip(dev_priv, fb_bits);
}
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5554,7 +5511,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
if (conn_state->crtc != crtc)
continue;
- encoder->enable(encoder, crtc_state, conn_state);
+ if (encoder->enable)
+ encoder->enable(encoder, crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -5575,7 +5533,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
continue;
intel_opregion_notify_encoder(encoder, false);
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ if (encoder->disable)
+ encoder->disable(encoder, old_crtc_state, old_conn_state);
}
}
@@ -5646,37 +5605,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_prepare_shared_dpll(intel_crtc);
+ if (pipe_config->has_pch_encoder)
+ intel_prepare_shared_dpll(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
- ironlake_set_pipeconf(crtc);
+ ironlake_set_pipeconf(pipe_config);
intel_crtc->active = true;
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(intel_crtc);
+ ironlake_fdi_pll_enable(pipe_config);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5685,10 +5644,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
@@ -5705,7 +5664,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -5739,10 +5698,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
uint32_t val;
- val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
-
- /* Program B credit equally to all pipes */
- val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+ val = MBUS_DBOX_A_CREDIT(2);
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
@@ -5754,7 +5712,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
@@ -5765,37 +5723,34 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->shared_dpll)
- intel_enable_shared_dpll(intel_crtc);
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ if (pipe_config->shared_dpll)
+ intel_enable_shared_dpll(pipe_config);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder)) {
I915_WRITE(PIPE_MULT(cpu_transcoder),
- intel_crtc->config->pixel_multiplier - 1);
+ pipe_config->pixel_multiplier - 1);
}
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(crtc);
+ haswell_set_pipeconf(pipe_config);
- haswell_set_pipemisc(crtc);
+ haswell_set_pipemisc(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -5803,14 +5758,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- intel_crtc->config->pch_pfit.enabled;
+ pipe_config->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(intel_crtc);
+ skylake_pfit_enable(pipe_config);
else
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5843,10 +5798,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
lpt_pch_enable(old_intel_state, pipe_config);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
@@ -5868,15 +5823,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
}
}
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (force || crtc->config->pch_pfit.enabled) {
+ if (old_crtc_state->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5907,14 +5862,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
- if (intel_crtc->config->has_pch_encoder)
+ if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
@@ -5965,24 +5920,24 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
}
-static void i9xx_pfit_enable(struct intel_crtc *crtc)
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *pipe_config = crtc->config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!pipe_config->gmch_pfit.control)
+ if (!crtc_state->gmch_pfit.control)
return;
/*
@@ -5992,8 +5947,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc->pipe);
- I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+ I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
@@ -6048,6 +6003,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
}
}
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->aux_ch) {
+ case AUX_CH_A:
+ return POWER_DOMAIN_AUX_A;
+ case AUX_CH_B:
+ return POWER_DOMAIN_AUX_B;
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_A;
+ }
+}
+
static u64 get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -6127,20 +6104,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -6151,16 +6126,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- chv_enable_pll(intel_crtc, intel_crtc->config);
+ chv_prepare_pll(intel_crtc, pipe_config);
+ chv_enable_pll(intel_crtc, pipe_config);
} else {
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- vlv_enable_pll(intel_crtc, intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, pipe_config);
+ vlv_enable_pll(intel_crtc, pipe_config);
}
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
@@ -6174,13 +6149,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6197,15 +6172,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- i9xx_set_pll_dividers(intel_crtc);
+ i9xx_set_pll_dividers(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_crtc->active = true;
@@ -6216,13 +6191,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_enable_pll(intel_crtc, pipe_config);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
- intel_crtc->config);
+ pipe_config);
else
intel_update_watermarks(intel_crtc);
intel_enable_pipe(pipe_config);
@@ -6233,12 +6208,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_pfit_disable(struct intel_crtc *crtc)
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!crtc->config->gmch_pfit.control)
+ if (!old_crtc_state->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6271,17 +6246,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- i9xx_pfit_disable(intel_crtc);
+ i9xx_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
- i9xx_disable_pll(intel_crtc);
+ i9xx_disable_pll(old_crtc_state);
}
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6355,7 +6330,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_fbc_disable(intel_crtc);
intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
@@ -6433,66 +6408,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
}
}
-int intel_connector_init(struct intel_connector *connector)
-{
- struct intel_digital_connector_state *conn_state;
-
- /*
- * Allocate enough memory to hold intel_digital_connector_state,
- * This might be a few bytes too many, but for connectors that don't
- * need it we'll free the state and allocate a smaller one on the first
- * succesful commit anyway.
- */
- conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
- if (!conn_state)
- return -ENOMEM;
-
- __drm_atomic_helper_connector_reset(&connector->base,
- &conn_state->base);
-
- return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
- struct intel_connector *connector;
-
- connector = kzalloc(sizeof *connector, GFP_KERNEL);
- if (!connector)
- return NULL;
-
- if (intel_connector_init(connector) < 0) {
- kfree(connector);
- return NULL;
- }
-
- return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free
- */
-void intel_connector_free(struct intel_connector *connector)
-{
- kfree(to_intel_digital_connector_state(connector->base.state));
- kfree(connector);
-}
-
-/* Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector. */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
- enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
-
- return encoder->get_hw_state(encoder, &pipe);
-}
-
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6603,6 +6518,9 @@ retry:
link_bw, &pipe_config->fdi_m_n, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6759,7 +6677,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+ if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+ pipe_config->base.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -6834,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
}
void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n)
@@ -6925,12 +6845,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
-static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6938,25 +6858,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2)
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder)
{
+ if (IS_HASWELL(dev_priv))
+ return transcoder == TRANSCODER_EDP;
+
+ /*
+ * Strictly speaking some registers are available before
+ * gen7, but we only support DRRS on gen7+
+ */
+ return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
- /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
- * for gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily accessed).
+ /*
+ * M2_N2 registers are set only if DRRS is supported
+ * (to make sure the registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
- INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
+ if (m2_n2 && crtc_state->has_drrs &&
+ transcoder_has_m2_n2(dev_priv, transcoder)) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6971,29 +6905,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
- struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
if (m_n == M1_N1) {
- dp_m_n = &crtc->config->dp_m_n;
- dp_m2_n2 = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m_n;
+ dp_m2_n2 = &crtc_state->dp_m2_n2;
} else if (m_n == M2_N2) {
/*
* M2_N2 registers are not supported. Hence m2_n2 divider value
* needs to be programmed into M1_N1.
*/
- dp_m_n = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m2_n2;
} else {
DRM_ERROR("Unsupported divider value\n");
return;
}
- if (crtc->config->has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+ if (crtc_state->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7092,8 +7026,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* Set HBR and RBR LPF coefficients */
if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
@@ -7120,7 +7054,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(pipe_config))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -7399,12 +7333,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -7418,7 +7353,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7460,18 +7395,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
}
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config->pipe_src_w - 1) << 16) |
- (intel_crtc->config->pipe_src_h - 1));
+ ((crtc_state->pipe_src_w - 1) << 16) |
+ (crtc_state->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7547,29 +7482,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
drm_mode_set_name(mode);
}
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config->double_wide)
+ if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+ if (crtc_state->dither && crtc_state->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -7585,9 +7521,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7595,11 +7531,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_PROGRESSIVE;
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc->config->limited_color_range)
+ crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
- I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(intel_crtc->pipe));
+ I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7882,8 +7818,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
}
+
+ if (val & DISPPLANE_ROTATE_180)
+ plane_config->rotation = DRM_MODE_ROTATE_180;
}
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+ val & DISPPLANE_MIRROR)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
@@ -7955,6 +7898,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+ pipe_config->lspcon_downsampling = false;
+
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+ bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ if (ycbcr420_enabled) {
+ /* We support 4:2:0 in full blend mode only */
+ if (!blend)
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else if (!(IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10))
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else
+ output = INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else {
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling = true;
+ output = INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
+ }
+ }
+
+ pipe_config->output_format = output;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -7967,6 +7953,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -8498,16 +8485,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
uint32_t val;
val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -8525,32 +8512,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config->limited_color_range)
+ if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(struct drm_crtc *crtc)
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
+ if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -8559,16 +8546,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
POSTING_READ(PIPECONF(cpu_transcoder));
}
-static void haswell_set_pipemisc(struct drm_crtc *crtc)
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *config = intel_crtc->config;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -8586,14 +8572,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
- if (config->ycbcr420) {
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
- PIPEMISC_YUV420_ENABLE |
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- }
I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
@@ -8804,12 +8792,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
- * gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily read).
- */
- if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
- crtc->config->has_drrs) {
+
+ if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -8952,6 +8936,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ switch (val & PLANE_CTL_ROTATE_MASK) {
+ case PLANE_CTL_ROTATE_0:
+ plane_config->rotation = DRM_MODE_ROTATE_0;
+ break;
+ case PLANE_CTL_ROTATE_90:
+ plane_config->rotation = DRM_MODE_ROTATE_270;
+ break;
+ case PLANE_CTL_ROTATE_180:
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ break;
+ case PLANE_CTL_ROTATE_270:
+ plane_config->rotation = DRM_MODE_ROTATE_90;
+ break;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ val & PLANE_CTL_FLIP_HORIZONTAL)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
@@ -8962,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
- stride_mult = intel_fb_stride_alignment(fb, 0);
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9018,6 +9025,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -9325,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ IS_ICELAKE(dev_priv)) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -9366,30 +9376,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
/* TODO: TBT pll not implemented. */
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ if (WARN_ON(!intel_dpll_is_combophy(id)))
return;
- break;
- case PORT_C:
- id = DPLL_ID_ICL_MGPLL1;
- break;
- case PORT_D:
- id = DPLL_ID_ICL_MGPLL2;
- break;
- case PORT_E:
- id = DPLL_ID_ICL_MGPLL3;
- break;
- case PORT_F:
- id = DPLL_ID_ICL_MGPLL4;
- break;
- default:
- MISSING_CASE(port);
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ id = icl_port_to_mg_pll_id(port);
+ } else {
+ WARN(1, "Invalid port %x\n", port);
return;
}
@@ -9479,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long enabled_panel_transcoders = 0;
+ enum transcoder panel_transcoder;
u32 tmp;
+ if (IS_ICELAKE(dev_priv))
+ panel_transcoder_mask |=
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
- * transcoder handled below.
+ * and DSI transcoders handled below.
*/
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -9491,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
+ for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ enum pipe trans_pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ /*
+ * Log all enabled ones, only use the first one.
+ *
+ * FIXME: This won't work for two separate DSI displays.
+ */
+ enabled_panel_transcoders |= BIT(panel_transcoder);
+ if (enabled_panel_transcoders != BIT(panel_transcoder))
+ continue;
+
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
+ WARN(1, "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
+ trans_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
+ trans_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
+ trans_pipe = PIPE_C;
break;
}
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (trans_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = panel_transcoder;
}
+ /*
+ * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ */
+ WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9646,33 +9670,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ IS_ICELAKE(dev_priv)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
intel_get_pipe_src_size(crtc, pipe_config);
+ intel_get_crtc_ycbcr_config(crtc, pipe_config);
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
- bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- bool blend_mode_420 = tmp &
- PIPEMISC_YUV420_MODE_FULL_BLEND;
-
- pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
- if (pipe_config->ycbcr420 != clrspace_yuv ||
- pipe_config->ycbcr420 != blend_mode_420)
- DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
- } else if (clrspace_yuv) {
- DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
- }
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -9718,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
base = obj->phys_handle->busaddr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -9941,15 +9950,13 @@ static void i845_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURPOS(PIPE_A), pos);
}
- POSTING_READ_FW(CURCNTR(PIPE_A));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i845_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, NULL, NULL);
+ i845_update_cursor(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10140,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always start the full update
- * with a CURCNTR write.
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
*
* On other platforms CURPOS always requires the
* CURBASE write to arm the update. Additonally
@@ -10151,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
- * CURCNTR and CUR_FBC_CTL are always
- * armed by the CURBASE write only.
+ * The other registers are armed by by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
if (HAS_CUR_FBC(dev_priv))
I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
I915_WRITE_FW(CURPOS(pipe), pos);
I915_WRITE_FW(CURBASE(pipe), base);
@@ -10171,15 +10183,13 @@ static void i9xx_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURBASE(pipe), base);
}
- POSTING_READ_FW(CURBASE(pipe));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, NULL, NULL);
+ i9xx_update_cursor(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10777,14 +10787,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
pipe_config->fb_bits |= plane->frontbuffer_bit;
/*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
* WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
*
- * cstate->update_wm was already set above, so this flag will
- * take effect when we commit and program watermarks.
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
*/
- if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
- needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state))
+ if (plane->id == PLANE_SPRITE0 &&
+ (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!needs_scaling(old_plane_state) &&
+ needs_scaling(to_intel_plane_state(plane_state)))))
pipe_config->disable_lp_wm = true;
return 0;
@@ -10820,6 +10856,101 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
return true;
}
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state, *linked_plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ linked = plane_state->linked_plane;
+
+ if (!linked)
+ continue;
+
+ linked_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_plane_state))
+ return PTR_ERR(linked_plane_state);
+
+ WARN_ON(linked_plane_state->linked_plane != plane);
+ WARN_ON(linked_plane_state->slave == plane_state->slave);
+ }
+
+ return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * Destroy all old plane links and make the slave plane invisible
+ * in the crtc_state->active_planes mask.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ continue;
+
+ plane_state->linked_plane = NULL;
+ if (plane_state->slave && !plane_state->base.visible) {
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ plane_state->slave = false;
+ }
+
+ if (!crtc_state->nv12_planes)
+ return 0;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *linked_state = NULL;
+
+ if (plane->pipe != crtc->pipe ||
+ !(crtc_state->nv12_planes & BIT(plane->id)))
+ continue;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+ if (!icl_is_nv12_y_plane(linked->id))
+ continue;
+
+ if (crtc_state->active_planes & BIT(linked->id))
+ continue;
+
+ linked_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_state))
+ return PTR_ERR(linked_state);
+
+ break;
+ }
+
+ if (!linked_state) {
+ DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ hweight8(crtc_state->nv12_planes));
+
+ return -EINVAL;
+ }
+
+ plane_state->linked_plane = linked;
+
+ linked_state->slave = true;
+ linked_state->linked_plane = plane;
+ crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
+ DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ }
+
+ return 0;
+}
+
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -10828,7 +10959,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- struct drm_atomic_state *state = crtc_state->state;
int ret;
bool mode_changed = needs_modeset(crtc_state);
@@ -10865,8 +10995,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
}
- if (dev_priv->display.compute_intermediate_wm &&
- !to_intel_atomic_state(state)->skip_intermediate_wm) {
+ if (dev_priv->display.compute_intermediate_wm) {
if (WARN_ON(!dev_priv->display.compute_pipe_wm))
return 0;
@@ -10882,9 +11011,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
}
- } else if (dev_priv->display.compute_intermediate_wm) {
- if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_GEN(dev_priv) >= 9) {
@@ -10892,6 +11018,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
+ ret = icl_check_nv12_planes(pipe_config);
+ if (!ret)
ret = skl_check_pipe_max_pixel_rate(intel_crtc,
pipe_config);
if (!ret)
@@ -10906,8 +11034,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_begin = intel_begin_crtc_commit,
- .atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
};
@@ -10936,30 +11062,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_state *pipe_config)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *pipe_config)
{
- const struct drm_display_info *info = &connector->base.display_info;
- int bpp = pipe_config->pipe_bpp;
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_info *info = &connector->display_info;
+ int bpp;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
- connector->base.base.id,
- connector->base.name);
-
- /* Don't use an invalid EDID bpc value */
- if (info->bpc != 0 && info->bpc * 3 < bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
- bpp, info->bpc * 3);
- pipe_config->pipe_bpp = info->bpc * 3;
+ switch (conn_state->max_bpc) {
+ case 6 ... 7:
+ bpp = 6 * 3;
+ break;
+ case 8 ... 9:
+ bpp = 8 * 3;
+ break;
+ case 10 ... 11:
+ bpp = 10 * 3;
+ break;
+ case 12:
+ bpp = 12 * 3;
+ break;
+ default:
+ return -EINVAL;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (info->bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ if (bpp < pipe_config->pipe_bpp) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
+
+ pipe_config->pipe_bpp = bpp;
}
+
+ return 0;
}
static int
@@ -10967,7 +11105,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state;
+ struct drm_atomic_state *state = pipe_config->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -10980,21 +11118,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
else
bpp = 8*3;
-
pipe_config->pipe_bpp = bpp;
- state = pipe_config->base.state;
-
- /* Clamp display bpp to EDID value */
+ /* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ int ret;
+
if (connector_state->crtc != &crtc->base)
continue;
- connected_sink_compute_bpp(to_intel_connector(connector),
- pipe_config);
+ ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+ if (ret)
+ return ret;
}
- return bpp;
+ return 0;
}
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11064,6 +11202,20 @@ static void snprintf_output_types(char *buf, size_t len,
WARN_ON_ONCE(output_types != 0);
}
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ format = INTEL_OUTPUT_FORMAT_INVALID;
+ return output_format_str[format];
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -11083,6 +11235,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
buf, pipe_config->output_types);
+ DRM_DEBUG_KMS("output format: %s\n",
+ output_formats(pipe_config->output_format));
+
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
@@ -11092,9 +11247,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
- if (pipe_config->ycbcr420)
- DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
-
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11283,7 +11435,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret = -EINVAL;
+ int base_bpp, ret;
int i;
bool retry = true;
@@ -11305,10 +11457,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
- base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- pipe_config);
- if (base_bpp < 0)
- goto fail;
+ ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ pipe_config);
+ if (ret)
+ return ret;
+
+ base_bpp = pipe_config->pipe_bpp;
/*
* Determine the real pipe dimensions. Note that stereo modes can
@@ -11330,7 +11484,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- goto fail;
+ return -EINVAL;
}
/*
@@ -11366,7 +11520,7 @@ encoder_retry:
if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
DRM_DEBUG_KMS("Encoder config failure\n");
- goto fail;
+ return -EINVAL;
}
}
@@ -11377,16 +11531,16 @@ encoder_retry:
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
- goto fail;
+ return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n")) {
- ret = -EINVAL;
- goto fail;
- }
+ if (WARN(!retry, "loop in pipe configuration computation\n"))
+ return -EINVAL;
DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
retry = false;
@@ -11402,8 +11556,7 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
-fail:
- return ret;
+ return 0;
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11672,6 +11825,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11680,7 +11834,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
- PIPE_CONF_CHECK_BOOL(ycbcr420);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -11802,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11812,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11854,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11904,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12189,8 +12346,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
verify_disabled_dpll_state(dev);
}
-static void update_scanline_offset(struct intel_crtc *crtc)
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
@@ -12221,7 +12379,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* answer that's slightly in the future.
*/
if (IS_GEN2(dev_priv)) {
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -12230,7 +12388,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
@@ -12513,6 +12671,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
ret = intel_modeset_pipe_config(crtc, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret) {
intel_dump_pipe_config(to_intel_crtc(crtc),
pipe_config, "[failed]");
@@ -12544,6 +12704,10 @@ static int intel_atomic_check(struct drm_device *dev,
intel_state->cdclk.logical = dev_priv->cdclk.logical;
}
+ ret = icl_add_linked_planes(intel_state);
+ if (ret)
+ return ret;
+
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
@@ -12583,7 +12747,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
to_intel_plane(crtc->primary));
if (modeset) {
- update_scanline_offset(intel_crtc);
+ update_scanline_offset(pipe_config);
dev_priv->display.crtc_enable(pipe_config, state);
/* vblanks work again, re-enable pipe CRC. */
@@ -12596,7 +12760,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+ intel_begin_crtc_commit(crtc, old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ else
+ i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+
+ intel_finish_crtc_commit(crtc, old_crtc_state);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12628,13 +12799,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-
- const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
if (new_crtc_state->active)
- entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12660,14 +12830,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(dev_priv,
+ if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
entries,
- &cstate->wm.skl.ddb,
- i))
+ INTEL_INFO(dev_priv)->num_pipes, i))
continue;
updated |= cmask;
- entries[i] = &cstate->wm.skl.ddb;
+ entries[i] = cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -12757,8 +12926,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
struct drm_crtc *crtc;
- struct intel_crtc_state *intel_cstate;
+ struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
int i;
@@ -12770,24 +12940,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+ intel_crtc = to_intel_crtc(crtc);
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- put_domains[to_intel_crtc(crtc)->pipe] =
+ put_domains[intel_crtc->pipe] =
modeset_get_crtc_power_domains(crtc,
- to_intel_crtc_state(new_crtc_state));
+ new_intel_crtc_state);
}
if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state));
+ intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
if (old_crtc_state->active) {
- intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+ intel_crtc_disable_planes(intel_state, intel_crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
@@ -12795,10 +12966,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
intel_crtc_disable_pipe_crc(intel_crtc);
- dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
+ dev_priv->display.crtc_disable(old_intel_crtc_state, state);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(old_intel_crtc_state);
/*
* Underruns don't always raise
@@ -12812,7 +12983,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
!HAS_GMCH_DISPLAY(dev_priv) &&
dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
+ new_intel_crtc_state);
}
}
@@ -12871,11 +13042,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- intel_cstate = to_intel_crtc_state(new_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
if (dev_priv->display.optimize_watermarks)
dev_priv->display.optimize_watermarks(intel_state,
- intel_cstate);
+ new_intel_crtc_state);
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13142,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
int err;
@@ -13258,13 +13429,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
- fb_obj_bump_render_priority(obj);
-
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
+ fb_obj_bump_render_priority(obj);
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
if (!new_state->fence) { /* implicit fencing */
@@ -13395,7 +13565,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (intel_cstate->update_pipe)
intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_crtc);
+ skl_detach_scalers(intel_cstate);
out:
if (dev_priv->display.atomic_update_watermarks)
@@ -13497,56 +13667,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- /* fall through */
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- if (modifier == I915_FORMAT_MOD_Yf_TILED)
- return true;
- /* fall through */
- case DRM_FORMAT_C8:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- /* fall through */
- default:
- return false;
- }
-}
-
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -13554,18 +13674,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
format == DRM_FORMAT_ARGB8888;
}
-static struct drm_plane_funcs skl_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static struct drm_plane_funcs i965_plane_funcs = {
+static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13576,7 +13685,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
.format_mod_supported = i965_plane_format_mod_supported,
};
-static struct drm_plane_funcs i8xx_plane_funcs = {
+static const struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13602,14 +13711,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *new_crtc_state;
/*
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->active || needs_modeset(crtc_state) ||
- to_intel_crtc_state(crtc_state)->update_pipe)
+ if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ crtc_state->update_pipe)
goto slow;
old_plane_state = plane->state;
@@ -13639,6 +13750,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (!new_plane_state)
return -ENOMEM;
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
drm_atomic_set_fb_for_plane(new_plane_state, fb);
new_plane_state->src_x = src_x;
@@ -13650,9 +13767,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
- ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
- to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
- to_intel_plane_state(plane->state),
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@@ -13674,14 +13790,25 @@ intel_legacy_cursor_update(struct drm_plane *plane,
/* Swap plane state */
plane->state = new_plane_state;
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane,
- to_intel_crtc_state(crtc->state),
+ intel_plane->update_plane(intel_plane, crtc_state,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(intel_plane, crtc_state);
}
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -13689,6 +13816,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(crtc, &new_crtc_state->base);
if (ret)
intel_plane_destroy_state(plane, new_plane_state);
else
@@ -13729,176 +13858,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
return i9xx_plane == PLANE_A;
}
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- /*
- * FIXME: ICL requires two hardware planes for scanning out NV12
- * framebuffers. Do not advertize support until this is implemented.
- */
- if (INTEL_GEN(dev_priv) >= 11)
- return false;
-
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
- return false;
-
- return true;
-}
-
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_plane *primary = NULL;
- struct intel_plane_state *state = NULL;
+ struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
- unsigned int num_formats;
- const uint64_t *modifiers;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&primary->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
- primary->base.state = &state->base;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
- primary->pipe = pipe;
+ plane->pipe = pipe;
/*
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
- primary->i9xx_plane = (enum i9xx_plane_id) pipe;
- primary->id = PLANE_PRIMARY;
- primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- if (INTEL_GEN(dev_priv) >= 9)
- primary->has_fbc = skl_plane_has_fbc(dev_priv,
- primary->pipe,
- primary->id);
- else
- primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
- primary->i9xx_plane);
-
- if (primary->has_fbc) {
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
struct intel_fbc *fbc = &dev_priv->fbc;
- fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 9) {
- primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_PRIMARY);
-
- if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
- intel_primary_formats = skl_pri_planar_formats;
- num_formats = ARRAY_SIZE(skl_pri_planar_formats);
- } else {
- intel_primary_formats = skl_primary_formats;
- num_formats = ARRAY_SIZE(skl_primary_formats);
- }
-
- if (primary->has_ccs)
- modifiers = skl_format_modifiers_ccs;
- else
- modifiers = skl_format_modifiers_noccs;
-
- primary->max_stride = skl_plane_max_stride;
- primary->update_plane = skl_update_plane;
- primary->disable_plane = skl_disable_plane;
- primary->get_hw_state = skl_plane_get_hw_state;
- primary->check_plane = skl_plane_check;
-
- plane_funcs = &skl_plane_funcs;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_primary_formats = i965_primary_formats;
+ if (INTEL_GEN(dev_priv) >= 4) {
+ formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i965_plane_funcs;
} else {
- intel_primary_formats = i8xx_primary_formats;
+ formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i8xx_plane_funcs;
}
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane 1%c", pipe_name(pipe));
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ possible_crtcs = BIT(pipe);
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
- plane_name(primary->i9xx_plane));
+ plane_name(plane->i9xx_plane));
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 10) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -13910,26 +13953,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&primary->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- if (INTEL_GEN(dev_priv) >= 9)
- drm_plane_create_color_properties(&primary->base,
- BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
- BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
- BIT(DRM_COLOR_YCBCR_FULL_RANGE),
- DRM_COLOR_YCBCR_BT709,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
-
- drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- return primary;
+ return plane;
fail:
- kfree(state);
- kfree(primary);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
@@ -13938,23 +13971,13 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct intel_plane *cursor = NULL;
- struct intel_plane_state *state = NULL;
+ unsigned int possible_crtcs;
+ struct intel_plane *cursor;
int ret;
- cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&cursor->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
-
- cursor->base.state = &state->base;
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
cursor->pipe = pipe;
cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -13981,8 +14004,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
+ possible_crtcs = BIT(pipe);
+
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
+ possible_crtcs, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -13997,16 +14022,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
-
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
fail:
- kfree(state);
- kfree(cursor);
+ intel_plane_free(cursor);
return ERR_PTR(ret);
}
@@ -14027,7 +14048,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_scaler *scaler = &scaler_state->scalers[i];
scaler->in_use = 0;
- scaler->mode = PS_SCALER_MODE_DYN;
+ scaler->mode = 0;
}
scaler_state->scaler_id = -1;
@@ -14122,18 +14143,6 @@ fail:
return ret;
}
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-{
- struct drm_device *dev = connector->base.dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- if (!connector->base.state->crtc)
- return INVALID_PIPE;
-
- return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -14250,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -14270,6 +14279,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
+ icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
@@ -14492,7 +14502,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
static
u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, uint32_t pixel_format)
+ u32 pixel_format, u64 fb_modifier)
{
struct intel_crtc *crtc;
struct intel_plane *plane;
@@ -14514,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- struct drm_format_name_buf format_name;
u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
@@ -14545,33 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- /* Passed in modifier sanity checking. */
- switch (mode_cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- default:
- DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- goto err;
- }
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
mode_cmd->modifier[0]);
goto err;
}
@@ -14586,8 +14576,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
- mode_cmd->pixel_format);
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14606,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- /* Reject formats not supported by any plane early. */
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_NV12:
- if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
- goto err;
- }
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
-
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
goto err;
@@ -14940,174 +14867,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = intel_update_crtcs;
}
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
- int device;
- int subsystem_vendor;
- int subsystem_device;
- void (*hook)(struct drm_device *dev);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
- void (*hook)(struct drm_device *dev);
- const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
- DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
- return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
- {
- .dmi_id_list = &(const struct dmi_system_id[]) {
- {
- .callback = intel_dmi_reverse_brightness,
- .ident = "NCR Corporation",
- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, ""),
- },
- },
- { } /* terminating entry */
- },
- .hook = quirk_invert_brightness,
- },
-};
-
-static struct intel_quirk intel_quirks[] = {
- /* Lenovo U160 cannot use SSC on LVDS */
- { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
- /* Sony Vaio Y cannot use SSC on LVDS */
- { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
- /* Acer Aspire 5734Z must invert backlight brightness */
- { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
- /* Acer/eMachines G725 */
- { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
- /* Acer/eMachines e725 */
- { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
- /* Acer/Packard Bell NCL20 */
- { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
- /* Acer Aspire 4736Z */
- { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
- /* Acer Aspire 5336 */
- { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
- /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
- { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Acer C720 Chromebook (Core i3 4005U) */
- { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Apple Macbook 2,1 (Core 2 T7400) */
- { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
- /* Apple Macbook 4,1 */
- { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
- /* Toshiba CB35 Chromebook (Celeron 2955U) */
- { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
- /* HP Chromebook 14 (Celeron 2955U) */
- { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
- /* Dell Chromebook 11 */
- { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Dell Chromebook 11 (2015 version) */
- { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Toshiba Satellite P50-C-18C */
- { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
- /* GeminiLake NUC */
- { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- /* ASRock ITX*/
- { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-static void intel_init_quirks(struct drm_device *dev)
-{
- struct pci_dev *d = dev->pdev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
- struct intel_quirk *q = &intel_quirks[i];
-
- if (d->device == q->device &&
- (d->subsystem_vendor == q->subsystem_vendor ||
- q->subsystem_vendor == PCI_ANY_ID) &&
- (d->subsystem_device == q->subsystem_device ||
- q->subsystem_device == PCI_ANY_ID))
- q->hook(dev);
- }
- for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
- if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(dev);
- }
-}
-
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
@@ -15267,6 +15026,14 @@ retry:
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
goto out;
+
+ /*
+ * FIXME hack to force a LUT update to avoid the
+ * plane update forcing the pipe gamma on without
+ * having a proper LUT loaded. Remove once we
+ * have readout for pipe gamma enable.
+ */
+ crtc_state->color_mgmt_changed = true;
}
}
@@ -15313,7 +15080,9 @@ int intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
- intel_init_quirks(dev);
+ intel_init_quirks(dev_priv);
+
+ intel_fbc_init(dev_priv);
intel_init_pm(dev_priv);
@@ -15545,8 +15314,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
- plane->base.name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15587,7 +15356,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15597,7 +15367,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
- if (crtc->active) {
+ if (crtc_state->base.active) {
struct intel_plane *plane;
/* Disable everything but the primary plane */
@@ -15613,10 +15383,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc->active && !intel_crtc_has_encoders(crtc))
+ if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base, ctx);
- if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
+ if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -15647,6 +15417,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector;
/* We need to check both for a crtc link (meaning that the
@@ -15670,7 +15441,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+ if (encoder->disable)
+ encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
if (encoder->post_disable)
encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
}
@@ -15687,6 +15459,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_sanitize_encoder_pll_mapping(encoder);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15735,6 +15510,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15887,7 +15666,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc);
+ update_scanline_offset(crtc_state);
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15942,6 +15721,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
}
}
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t hdmi_reg)
+{
+ u32 val = I915_READ(hdmi_reg);
+
+ if (val & SDVO_ENABLE ||
+ (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
+
+ val &= ~SDVO_PIPE_SEL_MASK;
+ val |= SDVO_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t dp_reg)
+{
+ u32 val = I915_READ(dp_reg);
+
+ if (val & DP_PORT_EN ||
+ (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+ port_name(port));
+
+ val &= ~DP_PIPE_SEL_MASK;
+ val |= DP_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The BIOS may select transcoder B on some of the PCH
+ * ports even it doesn't enable the port. This would trip
+ * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+ * Sanitize the transcoder select bits to prevent that. We
+ * assume that the BIOS never actually enabled the port,
+ * because if it did we'd actually have to toggle the port
+ * on and back off to make the transcoder A select stick
+ * (see. intel_dp_link_down(), intel_disable_hdmi(),
+ * intel_disable_sdvo()).
+ */
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+ /* PCH SDVOB multiplex with HDMIB */
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -15951,6 +15789,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
int i;
@@ -15962,6 +15801,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ if (HAS_PCH_IBX(dev_priv))
+ ibx_sanitize_pch_ports(dev_priv);
+
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
@@ -15969,7 +15811,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(&dev_priv->drm, crtc) {
drm_crtc_vblank_reset(&crtc->base);
- if (crtc->active)
+ if (crtc->base.state->active)
drm_crtc_vblank_on(&crtc->base);
}
@@ -15979,8 +15821,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
+ crtc_state = to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc, crtc->config,
+ intel_dump_pipe_config(crtc, crtc_state,
"[setup_hw_state]");
}
@@ -16014,7 +15857,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
u64 put_domains;
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -16058,29 +15902,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-int intel_connector_register(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- int ret;
-
- ret = intel_backlight_device_register(intel_connector);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- intel_backlight_device_unregister(intel_connector);
- intel_panel_destroy_backlight(connector);
-}
-
static void intel_hpd_poll_fini(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -16091,9 +15912,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
- if (connector->hdcp_shim) {
- cancel_delayed_work_sync(&connector->hdcp_check_work);
- cancel_work_sync(&connector->hdcp_prop_work);
+ if (connector->hdcp.shim) {
+ cancel_delayed_work_sync(&connector->hdcp.check_work);
+ cancel_work_sync(&connector->hdcp.prop_work);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -16133,18 +15954,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev_priv);
+ intel_overlay_cleanup(dev_priv);
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
-}
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder)
-{
- connector->encoder = encoder;
- drm_connector_attach_encoder(&connector->base, &encoder->base);
+ intel_fbc_cleanup_cfb(dev_priv);
}
/*
@@ -16234,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e31205..4262452963b3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -43,6 +43,11 @@ enum i915_gpio {
GPIOM,
};
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
enum pipe {
INVALID_PIPE = -1,
@@ -57,12 +62,25 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
I915_MAX_TRANSCODERS
};
@@ -120,6 +138,9 @@ enum plane_id {
PLANE_SPRITE0,
PLANE_SPRITE1,
PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
PLANE_CURSOR,
I915_MAX_PLANES,
@@ -221,6 +242,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -363,7 +385,7 @@ struct intel_link_m_n {
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@@ -373,10 +395,18 @@ struct intel_link_m_n {
for_each_power_well(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -402,10 +432,18 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n);
-
bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56a9ce7..fdd2cbc56fa3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,19 @@
#define DP_DPRX_ESI_LEN 14
+/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
+#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
+#define DP_DSC_MIN_SUPPORTED_BPC 8
+#define DP_DSC_MAX_SUPPORTED_BPC 10
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE 2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +106,14 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -222,138 +243,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
-
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
- return;
-
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
-
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_info);
- }
- break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- break;
-
- default:
- MISSING_CASE(intel_dig_port->tc_type);
- return;
- }
-
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
-}
-
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
@@ -455,7 +344,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
max_rate = cnl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +505,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -641,7 +533,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
- if (mode_rate > max_rate || target_clock > max_dotclk)
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_output_bpp =
+ drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+ dsc_slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay);
+ }
+ }
+
+ if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
+ target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -690,7 +608,8 @@ static void pps_lock(struct intel_dp *intel_dp)
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
}
@@ -701,7 +620,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
}
static void
@@ -1156,6 +1076,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (index)
return 0;
@@ -1165,7 +1086,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dp->aux_ch == AUX_CH_A)
+ if (dig_port->aux_ch == AUX_CH_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1095,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1503,80 +1425,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
-static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
- enum aux_ch aux_ch;
-
- if (!info->alternate_aux_channel) {
- aux_ch = (enum aux_ch) port;
-
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- aux_ch = AUX_CH_A;
- break;
- case DP_AUX_B:
- aux_ch = AUX_CH_B;
- break;
- case DP_AUX_C:
- aux_ch = AUX_CH_C;
- break;
- case DP_AUX_D:
- aux_ch = AUX_CH_D;
- break;
- case DP_AUX_E:
- aux_ch = AUX_CH_E;
- break;
- case DP_AUX_F:
- aux_ch = AUX_CH_F;
- break;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- aux_ch = AUX_CH_A;
- break;
- }
-
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
- return aux_ch;
-}
-
-static enum intel_display_power_domain
-intel_aux_power_domain(struct intel_dp *intel_dp)
-{
- switch (intel_dp->aux_ch) {
- case AUX_CH_A:
- return POWER_DOMAIN_AUX_A;
- case AUX_CH_B:
- return POWER_DOMAIN_AUX_B;
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F;
- default:
- MISSING_CASE(intel_dp->aux_ch);
- return POWER_DOMAIN_AUX_A;
- }
-}
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1592,7 +1446,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1608,7 +1463,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1626,7 +1482,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1644,7 +1501,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1663,7 +1521,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1689,10 +1548,8 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-
- intel_dp->aux_ch = intel_aux_ch(intel_dp);
- intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
if (INTEL_GEN(dev_priv) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1853,6 +1710,41 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 11 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ return false;
+
+ return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -1951,14 +1843,158 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return false;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static bool
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ u8 dsc_max_bpc;
+ int pipe_bpp;
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return false;
+
+ dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+ conn_state->max_requested_bpc);
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+ DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ return false;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc_params.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ return false;
+ }
+ pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ }
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+ if (pipe_config->dsc_params.slice_count > 1) {
+ pipe_config->dsc_params.dsc_split = true;
+ } else {
+ DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ return false;
+ }
+ }
+ if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+ DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+ return false;
+ }
+ pipe_config->dsc_params.compression_enable = true;
+ DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp,
+ pipe_config->dsc_params.slice_count);
+
+ return true;
+}
+
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
+ bool ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -1975,13 +2011,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = 6 * 3;
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
- * designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * advertizes being capable of. The eDP 1.3 and earlier panels
+ * are generally designed to support only a single clock and
+ * lane configuration, and typically these values correspond to
+ * the native resolution of the panel. With eDP 1.4 rate select
+ * and DSC, this is decreasingly the case, and we need to be
+ * able to select less than maximum link config.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -1995,23 +2033,52 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
- return false;
-
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we use the max clock and lane count for eDP 1.3 and
+ * earlier, and fast vs. wide is irrelevant.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+ &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+ &limits);
+
+ /* enable compression if the mode doesn't fit available BW */
+ if (!ret) {
+ if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits))
+ return false;
+ }
+
+ if (pipe_config->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc_params.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
return true;
}
@@ -2023,6 +2090,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +2102,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ if (lspcon->active)
+ lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+
pipe_config->has_drrs = false;
if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false;
@@ -2072,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- if (!intel_dp_compute_link_config(encoder, pipe_config))
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2090,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (!pipe_config->dsc_params.compression_enable)
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
+ else
+ intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2338,7 +2422,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->base.port));
@@ -2424,7 +2509,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2623,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2546,10 +2633,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2655,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2788,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ int ret;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -3900,6 +4003,40 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+ /*
+ * Clear the cached register set to avoid using stale values
+ * for the sinks that do not support DSC.
+ */
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+ /* Clear fec_capable to avoid using stale values */
+ intel_dp->fec_capable = 0;
+
+ /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+ intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+ intel_dp->dsc_dpcd,
+ sizeof(intel_dp->dsc_dpcd)) < 0)
+ DRM_ERROR("Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
+
+ DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
+
+ /* FEC is supported only on DP 1.4 */
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ DRM_ERROR("Failed to read FEC DPCD register\n");
+
+ DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ }
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
@@ -3976,6 +4113,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
+ /* Read the eDP DSC DPCD registers */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
return true;
}
@@ -3983,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- u8 sink_count;
-
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -3994,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
- return false;
-
/*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
+ * Some eDP panels do not set a valid value for sink count, that is why
+ * it don't care about read it here and in intel_edp_init_dpcd().
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+ if (!intel_dp_is_edp(intel_dp)) {
+ u8 count;
+ ssize_t r;
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
- return false;
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+ if (r < 1)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
@@ -4029,16 +4178,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+intel_dp_sink_can_mst(struct intel_dp *intel_dp)
{
u8 mstm_cap;
- if (!i915_modparams.enable_dp_mst)
- return false;
-
- if (!intel_dp->can_mst)
- return false;
-
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
@@ -4048,34 +4191,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
return mstm_cap & DP_MST_CAP;
}
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+ return i915_modparams.enable_dp_mst &&
+ intel_dp->can_mst &&
+ intel_dp_sink_can_mst(intel_dp);
+}
+
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
- if (!i915_modparams.enable_dp_mst)
- return;
+ struct intel_encoder *encoder =
+ &dp_to_dig_port(intel_dp)->base;
+ bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+
+ DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
+ port_name(encoder->port), yesno(intel_dp->can_mst),
+ yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
- intel_dp->is_mst = intel_dp_can_mst(intel_dp);
-
- if (intel_dp->is_mst)
- DRM_DEBUG_KMS("Sink is MST capable\n");
- else
- DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = sink_can_mst &&
+ i915_modparams.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
- return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector) == 1;
-}
-
-static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4228,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay)
+{
+ u16 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+ * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8 *
+ DP_DSC_FEC_OVERHEAD_FACTOR) /
+ mode_clock;
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+ mode_hdisplay;
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock,
+ int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(uint8_t, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
@@ -4341,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (!intel_dp->link_trained)
return false;
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
if (!intel_dp_get_link_status(intel_dp, link_status))
return false;
@@ -4403,7 +4644,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
@@ -4414,7 +4655,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
@@ -4462,6 +4703,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
return changed;
}
+static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+{
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+ return;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+ if (val & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+
+ if (val & DP_CP_IRQ)
+ intel_hdcp_check_link(intel_dp->attached_connector);
+
+ if (val & DP_SINK_SPECIFIC_IRQ)
+ DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -4479,7 +4743,6 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4502,20 +4765,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +5060,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
type_str);
}
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +5117,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
if (dig_port->tc_type == TC_PORT_TYPEC &&
!(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ icl_tc_phy_disconnect(dev_priv, dig_port);
return false;
}
@@ -4881,21 +5132,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
+ if (dig_port->tc_type == TC_PORT_UNKNOWN)
return;
/*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
+ * TBT disconnection flow is read the live status, what was done in
+ * caller.
*/
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+ if (dig_port->tc_type == TC_PORT_TYPEC ||
+ dig_port->tc_type == TC_PORT_LEGACY) {
+ u32 val;
+
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
+
+ dig_port->tc_type = TC_PORT_UNKNOWN;
}
/*
@@ -4945,19 +5199,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- case HPD_PORT_B:
+ if (intel_port_is_combophy(dev_priv, encoder->port))
return icl_combo_port_connected(dev_priv, dig_port);
- case HPD_PORT_C:
- case HPD_PORT_D:
- case HPD_PORT_E:
- case HPD_PORT_F:
+ else if (intel_port_is_tc(dev_priv, encoder->port))
return icl_tc_port_connected(dev_priv, dig_port);
- default:
+ else
MISSING_CASE(encoder->hpd_pin);
- return false;
- }
+
+ return false;
}
/*
@@ -4982,20 +5231,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (IS_GEN5(dev_priv))
- return ilk_digital_port_connected(encoder);
- else if (IS_GEN6(dev_priv))
- return snb_digital_port_connected(encoder);
- else if (IS_GEN7(dev_priv))
- return ivb_digital_port_connected(encoder);
- else if (IS_GEN8(dev_priv))
- return bdw_digital_port_connected(encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_digital_port_connected(encoder);
+ else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+ return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
- return spt_digital_port_connected(encoder);
- else
- return icl_digital_port_connected(encoder);
+ else if (IS_GEN8(dev_priv))
+ return bdw_digital_port_connected(encoder);
+ else if (IS_GEN7(dev_priv))
+ return ivb_digital_port_connected(encoder);
+ else if (IS_GEN6(dev_priv))
+ return snb_digital_port_connected(encoder);
+ else if (IS_GEN5(dev_priv))
+ return ilk_digital_port_connected(encoder);
+
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ return false;
}
static struct edid *
@@ -5042,28 +5294,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector,
- struct drm_modeset_acquire_ctx *ctx)
+intel_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- u8 sink_irq_vector = 0;
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
+ else if (intel_digital_port_connected(encoder))
status = intel_dp_detect_dpcd(intel_dp);
else
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5348,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp_print_rates(intel_dp);
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
@@ -5109,9 +5372,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
- intel_dp_retrain_link(encoder, ctx);
+ ret = intel_dp_retrain_link(encoder, ctx);
+ if (ret) {
+ intel_display_power_put(dev_priv, aux_domain);
+ return ret;
+ }
}
/*
@@ -5123,61 +5390,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) ||
+ to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp->detect_done = true;
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
- return status;
-}
-
-static int
-intel_dp_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int status = connector->status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- /* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done) {
- struct drm_crtc *crtc;
- int ret;
-
- crtc = connector->state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
- }
-
- status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
- }
-
- intel_dp->detect_done = false;
-
+ intel_display_power_put(dev_priv, aux_domain);
return status;
}
@@ -5185,8 +5408,11 @@ static void
intel_dp_force(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5195,11 +5421,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, aux_domain);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5490,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
-static void
-intel_dp_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- kfree(intel_connector->detect_edid);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- /*
- * Can't call intel_dp_is_edp() since the encoder may have been
- * destroyed already.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_fini(&intel_connector->panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5553,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+ DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5364,10 +5570,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_ERROR("Aksv write over DP/AUX was empty\n");
+ DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
return -EIO;
}
@@ -5382,7 +5588,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5400,7 +5606,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5415,7 +5621,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -5445,7 +5651,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5460,7 +5666,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5688,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
- ret);
+ DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -5503,7 +5709,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5526,7 +5732,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -5565,6 +5771,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5578,7 +5785,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
}
@@ -5631,7 +5838,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_connector_register,
.early_unregister = intel_dp_connector_unregister,
- .destroy = intel_dp_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -5673,11 +5880,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
if (long_hpd) {
intel_dp->reset_link_params = true;
- intel_dp->detect_done = false;
return IRQ_NONE;
}
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5897,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
- intel_dp->detect_done = false;
goto put_power;
}
}
@@ -5700,19 +5906,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
handled = intel_dp_short_pulse(intel_dp);
- /* Short pulse can signify loss of hdcp authentication */
- intel_hdcp_check_link(intel_dp->attached_connector);
-
- if (!handled) {
- intel_dp->detect_done = false;
+ if (!handled)
goto put_power;
- }
}
ret = IRQ_HANDLED;
put_power:
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
return ret;
}
@@ -5743,6 +5945,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 6, 10);
+ else if (INTEL_GEN(dev_priv) >= 5)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -6099,10 +6305,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
switch (index) {
case DRRS_HIGH_RR:
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ intel_dp_set_m_n(crtc_state, M1_N1);
break;
case DRRS_LOW_RR:
- intel_dp_set_m_n(intel_crtc, M2_N2);
+ intel_dp_set_m_n(crtc_state, M2_N2);
break;
case DRRS_MAX_RR:
default:
@@ -6422,6 +6628,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!intel_dp_is_edp(intel_dp))
return true;
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
+
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -6514,6 +6722,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
+ if (fixed_mode)
+ drm_connector_init_panel_orientation_property(
+ connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+
return true;
out_vdd_off:
@@ -6624,9 +6836,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- edp_panel_vdd_work);
-
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (HAS_DDI(dev_priv))
@@ -6743,6 +6952,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a911691dbd0f..4de247ddf05f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (intel_dp->active_mst_links == 0 &&
- intel_dig_port->base.pre_pll_enable)
+ if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
pipe_config, NULL);
}
+static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0)
+ intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
+ old_crtc_state,
+ old_conn_state);
+}
+
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
intel_connector->port);
}
-static void
-intel_dp_mst_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dp_mst_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+ intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab656b06..3c7f10d17658 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac9225536..d513ca875c67 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc: CRTC which has a shared dpll
+ * @crtc_state: CRTC, and its state, which has a shared dpll
*
* This calls the PLL's prepare hook if it has one and if the PLL is not
* already enabled. The prepare hook is platform specific.
*/
-void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(pll == NULL))
return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Enable the shared DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(struct intel_crtc *crtc)
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
@@ -199,14 +199,15 @@ out:
/**
* intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Disable the shared DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- /* Make sure no transcoder isn't still depending on us. */
- for_each_intel_crtc(dev, crtc) {
- if (crtc->config->shared_dpll == pll)
- assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
- }
I915_WRITE(PCH_DPLL(id), 0);
POSTING_READ(PCH_DPLL(id));
@@ -2530,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
if (intel_port_is_tc(dev_priv, encoder->port))
ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2628,11 +2622,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
}
-static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
{
return port - PORT_C + DPLL_ID_ICL_MGPLL1;
}
+bool intel_dpll_is_combophy(enum intel_dpll_id id)
+{
+ return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
+}
+
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
uint32_t *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2874,8 +2873,8 @@ static struct intel_shared_dpll *
icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
@@ -2883,18 +2882,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
bool ret;
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
&pll_state);
- break;
- case PORT_C:
- case PORT_D:
- case PORT_E:
- case PORT_F:
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ if (encoder->type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp_mst_encoder *mst_encoder;
+
+ mst_encoder = enc_to_mst(&encoder->base);
+ intel_dig_port = mst_encoder->primary;
+ } else {
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+ }
+
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
@@ -2906,8 +2908,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
&pll_state);
}
- break;
- default:
+ } else {
MISSING_CASE(port);
return NULL;
}
@@ -2932,21 +2933,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
{
- switch (id) {
- default:
- MISSING_CASE(id);
- /* fall through */
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
+ if (intel_dpll_is_combophy(id))
return CNL_DPLL_ENABLE(id);
- case DPLL_ID_ICL_TBTPLL:
+ else if (id == DPLL_ID_ICL_TBTPLL)
return TBT_PLL_ENABLE;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
+ /*
+ * TODO: Make MG_PLL macros use
+ * tc port id instead of port id
+ */
return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
- }
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2961,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) ||
+ id == DPLL_ID_ICL_TBTPLL) {
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ } else {
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3003,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- break;
- default:
- MISSING_CASE(id);
}
ret = true;
@@ -3104,21 +3091,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", id);
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
icl_dpll_write(dev_priv, pll);
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
icl_mg_pll_write(dev_priv, pll);
- break;
- default:
- MISSING_CASE(id);
- }
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a4dc63..a033d8f06d4a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(struct intel_crtc *crtc);
-void intel_enable_shared_dpll(struct intel_crtc *crtc);
-void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8b298e5f012d..f94a04b4ad87 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
bool *hdcp_capable);
};
+struct intel_hdcp {
+ const struct intel_hdcp_shim *shim;
+ /* Mutex for hdcp state of the connector */
+ struct mutex mutex;
+ u64 value;
+ struct delayed_work check_work;
+ struct work_struct prop_work;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -413,11 +422,7 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
- const struct intel_hdcp_shim *hdcp_shim;
- struct mutex hdcp_mutex;
- uint64_t hdcp_value; /* protected by hdcp_mutex */
- struct delayed_work hdcp_check_work;
- struct work_struct hdcp_prop_work;
+ struct intel_hdcp hdcp;
};
struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
*/
int scaler_id;
+ /*
+ * linked_plane:
+ *
+ * ICL planar formats require 2 planes that are updated as pairs.
+ * This member is used to make sure the other plane is also updated
+ * when required, and for update_slave() to find the correct
+ * plane_state to pass as argument.
+ */
+ struct intel_plane *linked_plane;
+
+ /*
+ * slave:
+ * If set don't update use the linked plane's state for updating
+ * this plane during atomic commit with the update_slave() callback.
+ *
+ * It's also used by the watermark code to ignore wm calculations on
+ * this plane. They're calculated by the linked plane's wm code.
+ */
+ u32 slave;
+
struct drm_intel_sprite_colorkey ckey;
};
@@ -547,6 +572,7 @@ struct intel_initial_plane_config {
unsigned int tiling;
int size;
u32 base;
+ u8 rotation;
};
#define SKL_MIN_SRC_W 8
@@ -680,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -712,6 +740,13 @@ struct intel_crtc_wm_state {
bool need_postvbl_update;
};
+enum intel_output_format {
+ INTEL_OUTPUT_FORMAT_INVALID,
+ INTEL_OUTPUT_FORMAT_RGB,
+ INTEL_OUTPUT_FORMAT_YCBCR420,
+ INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -893,14 +928,32 @@ struct intel_crtc_state {
u8 active_planes;
u8 nv12_planes;
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
- /* output format is YCBCR 4:2:0 */
- bool ycbcr420;
+ /* Output format RGB/YCBCR etc */
+ enum intel_output_format output_format;
+
+ /* Output down scaling is done in LSPCON device */
+ bool lspcon_downsampling;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ } dsc_params;
+ struct drm_dsc_config dp_dsc_cfg;
+
+ /* Forward Error correction State */
+ bool fec_enable;
};
struct intel_crtc {
@@ -973,8 +1026,11 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+ void (*update_slave)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
- struct intel_crtc *crtc);
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1070,13 +1126,13 @@ struct intel_dp {
bool link_mst;
bool link_trained;
bool has_audio;
- bool detect_done;
bool reset_link_params;
- enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+ u8 fec_capable;
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1094,7 +1150,6 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
- enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1156,9 +1211,15 @@ struct intel_dp {
struct intel_dp_compliance compliance;
};
+enum lspcon_vendor {
+ LSPCON_VENDOR_MCA,
+ LSPCON_VENDOR_PARADE
+};
+
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
+ enum lspcon_vendor vendor;
};
struct intel_digital_port {
@@ -1170,18 +1231,20 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
+ /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+ enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
enum tc_port_type tc_type;
- void (*write_infoframe)(struct drm_encoder *encoder,
+ void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
- void (*set_infoframes)(struct drm_encoder *encoder,
+ void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ bool (*infoframe_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1281,6 +1344,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return NULL;
}
+static inline struct intel_digital_port *
+conn_to_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
static inline struct intel_dp_mst_encoder *
enc_to_mst(struct drm_encoder *encoder)
{
@@ -1306,6 +1375,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct drm_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->lspcon;
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1331,6 +1406,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
}
static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *ret =
+ drm_atomic_get_plane_state(&state->base, &plane->base);
+
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_plane_state *
intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1438,12 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@@ -1488,7 +1581,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@@ -1509,20 +1601,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
-int intel_connector_init(struct intel_connector *);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
-
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1628,9 +1712,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
@@ -1641,6 +1727,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -1670,6 +1758,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+/* intel_connector.c */
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
@@ -1695,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1728,9 +1837,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1748,6 +1854,16 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay);
+uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -1768,6 +1884,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* vlv_dsi.c */
void vlv_dsi_init(struct drm_i915_private *dev_priv);
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1858,7 +1977,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
/* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1866,19 +1984,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
-
-/* intel_modes.c */
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_i915_private *dev_priv);
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1907,7 +2015,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-void intel_panel_destroy_backlight(struct drm_connector *connector);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1936,6 +2043,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1961,12 +2069,18 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+/* intel_quirks.c */
+void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
@@ -2090,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2101,10 +2218,13 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@@ -2127,23 +2247,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-void skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-unsigned int skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct intel_plane *plane)
+{
+ if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
+ return false;
+
+ return plane->id < PLANE_SPRITE2;
+}
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2185,11 +2311,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
-struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -2205,6 +2336,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..5fec02aceaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ if (WARN_ON(bpp < 0))
+ bpp = 16;
+
+ return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+ switch (intel_dsi->escape_clk_div) {
+ default:
+ case 0:
+ return 50;
+ case 1:
+ return 100;
+ case 2:
+ return 200;
+ }
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = funcs;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ orientation = dev_priv->vbt.dsi.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ orientation = dev_priv->vbt.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb32983..d968f1f13e09 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
u16 dcs_backlight_ports;
u16 dcs_cabc_ports;
+ /* RGB or BGR */
+ bool bgr_enabled;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
+
+ /* data lanes dphy timing */
+ u32 dphy_data_lane_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
/* timeouts in byte clocks */
+ u16 hs_tx_timeout;
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
@@ -129,9 +136,36 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
+/* intel_dsi.c */
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+
/* vlv_dsi.c */
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +192,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b89ae0..a1a8b3790e61 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -111,6 +123,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
@@ -181,7 +194,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+ if (!IS_ICELAKE(dev_priv))
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
@@ -322,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -345,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_VALLEYVIEW(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -481,6 +503,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+
+ /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+ if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ return;
+
+ msleep(msec);
+}
+
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +532,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
return 1;
}
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
- u32 bpp;
- u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
- u32 ui_num, ui_den;
+ u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
- u32 tclk_prepare_clkzero, ths_prepare_hszero;
- u32 lp_to_hs_switch, hs_to_lp_switch;
- u32 pclk, computed_ddr;
- u32 mul;
- u16 burst_mode_ratio;
- enum port port;
-
- DRM_DEBUG_KMS("\n");
-
- intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
- intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
- intel_dsi->lane_count = mipi_config->lane_cnt + 1;
- intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
- bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
- intel_dsi->dual_link = mipi_config->dual_link;
- intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- intel_dsi->operation_mode = mipi_config->is_cmd_mode;
- intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
- intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
- intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
- intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
- intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
- intel_dsi->init_count = mipi_config->master_init_timer;
- intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- pclk = mode->clock;
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
- /* In dual link mode each port needs half of pixel clock */
- if (intel_dsi->dual_link) {
- pclk = pclk / 2;
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
- /* we can enable pixel_overlap if needed by panel. In this
- * case we need to increase the pixelclock for extra pixels
- */
- if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- pclk += DIV_ROUND_UP(mode->vtotal *
- intel_dsi->pixel_overlap *
- 60, 1000);
- }
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
- /* Burst Mode Ratio
- * Target ddr frequency from VBT / non burst ddr freq
- * multiply by 100 to preserve remainder
- */
- if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- if (mipi_config->target_burst_mode_freq) {
- computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
- if (mipi_config->target_burst_mode_freq <
- computed_ddr) {
- DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
- }
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- computed_ddr);
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
- pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
- } else {
- DRM_ERROR("Burst mode target is not set\n");
- return false;
- }
- } else
- burst_mode_ratio = 100;
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
- intel_dsi->burst_mode_ratio = burst_mode_ratio;
- intel_dsi->pclk = pclk;
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
- bitrate = (pclk * bpp) / intel_dsi->lane_count;
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+}
- switch (intel_dsi->escape_clk_div) {
- case 0:
- tlpx_ns = 50;
- break;
- case 1:
- tlpx_ns = 100;
- break;
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
- case 2:
- tlpx_ns = 200;
- break;
- default:
- tlpx_ns = 50;
- break;
- }
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
switch (intel_dsi->lane_count) {
case 1:
@@ -620,7 +668,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* in Kbps */
ui_num = NS_KHZ_RATIO;
- ui_den = bitrate;
+ ui_den = intel_dsi_bitrate(intel_dsi);
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +794,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+ struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ u16 burst_mode_ratio;
+ enum port port;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+ /* Starting point, adjusted depending on dual link and burst mode */
+ intel_dsi->pclk = mode->clock;
+
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ intel_dsi->pclk /= 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+ }
+ }
+
+ /* Burst Mode Ratio
+ * Target ddr frequency from VBT / non burst ddr freq
+ * multiply by 100 to preserve remainder
+ */
+ if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ if (mipi_config->target_burst_mode_freq) {
+ u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ DRM_ERROR("Burst mode freq is less than computed\n");
+ return false;
+ }
+
+ burst_mode_ratio = DIV_ROUND_UP(
+ mipi_config->target_burst_mode_freq * 100,
+ bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+ } else {
+ DRM_ERROR("Burst mode target is not set\n");
+ return false;
+ }
+ } else
+ burst_mode_ratio = 100;
+
+ intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+ if (IS_ICELAKE(dev_priv))
+ icl_dphy_param_init(intel_dsi);
+ else
+ vlv_dphy_param_init(intel_dsi);
DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff49708..0042a7f69387 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
return 0;
}
-static void intel_dvo_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- intel_panel_fini(&to_intel_connector(connector)->panel);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dvo_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..af2873403009 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
- if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
- if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
return -EINVAL;
GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
- GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
err = -EINVAL;
err_id = id;
- if (GEM_WARN_ON(!init))
+ if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
- BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+ GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
- lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -490,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -809,7 +774,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1499,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
- list_for_each_entry(rq, &p->requests, sched.link) {
+ priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
@@ -1559,8 +1524,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
- drm_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
+ drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid,
+ task_state_to_char(w->tsk),
+ w->seqno);
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c700ef..f23570c44323 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_GEN(dev_priv) == 7)
+ if (IS_GEN7(dev_priv))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
if (!cache->plane.visible)
return;
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+ cache->fb.format->has_alpha) {
+ fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+ return false;
+ }
+
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
@@ -1301,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->has_fbc = false;
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f99332972b7a..fb5bb5b32a60 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
- intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
fb->base.format->cpp[0] * 8,
cur_size);
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea69385d..8660af3fd755 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
unsigned int i;
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+/*
+ * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
+ * then return, so waiting on the H2G is not enough to guarantee GuC is done.
+ * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
+ * scratch register 14, so we can poll on that. Note that GuC does not ensure
+ * that the value in the register is different from
+ * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
+ * take care of that ourselves as well.
+ */
+static int guc_sleep_state_action(struct intel_guc *guc,
+ const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+ u32 status;
+
+ I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+
+ ret = intel_guc_send(guc, action, len);
+ if (ret)
+ return ret;
+
+ ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK,
+ 0, 0, 10, &status);
+ if (ret)
+ return ret;
+
+ if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
+ DRM_ERROR("GuC failed to change sleep state. "
+ "action=0x%x, err=%u\n",
+ action[0], status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf48c46..0f1c4f9ebfd8 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
+static inline bool intel_guc_is_alive(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_loaded(&guc->fw);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcce467c..a67144ee5ceb 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
- DRM_WARN("%s: No firmware known for this platform!\n",
+ dev_info(dev_priv->drm.dev,
+ "%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
}
}
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
- if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
- guc_fw->rsa_offset) != sizeof(rsa))
- return -EINVAL;
+ sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
+ rsa, sizeof(rsa), guc->fw.rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
- return 0;
}
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
- u32 status;
- int ret;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- /* Wait for DMA to finish */
- ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
- 2, 100, &status);
- DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
-
- return ret;
+ /* Did we complete the xfer? */
+ *status = I915_READ(DMA_CTRL);
+ return !(*status & START_DMA);
}
/*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver will attempt to fall back to
- * execlist mode if this happens.)
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(guc, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -ENOEXEC;
}
+ if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+ DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
+ status);
+ ret = -ENXIO;
+ }
+
return ret;
}
/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ */
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
+ unsigned long offset;
+
+ /*
+ * The header plus uCode will be copied to WOPCM via DMA, excluding any
+ * other components
+ */
+ I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+
+ /* Set the source address for the new blob */
+ offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /*
+ * Set the DMA destination. Current uCode expects the code to be
+ * loaded at 8k; locations below this are used for the stack.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /* Finally start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+ return guc_wait_ucode(guc);
+}
+/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- ret = guc_xfer_rsa(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+ guc_xfer_rsa(guc, vma);
ret = guc_xfer_ucode(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware code xfer error %d\n", ret);
-
- ret = guc_wait_ucode(guc);
- if (ret)
- DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d591c784..b2f5148f4f17 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+#define GUC_DOORBELL_INVALID 256
+
+#define GUC_DB_SIZE (PAGE_SIZE)
+#define GUC_WQ_SIZE (PAGE_SIZE * 2)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -59,9 +64,6 @@
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
-#define GUC_DOORBELL_ENABLED 1
-#define GUC_DOORBELL_DISABLED 0
-
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
u32 header_info;
} __packed;
-struct guc_doorbell_info {
- u32 db_status;
- u32 cookie;
- u32 reserved[14];
-} __packed;
-
-union guc_doorbell_qw {
- struct {
- u32 db_status;
- u32 cookie;
- };
- u64 value_qw;
-} __packed;
-
-#define GUC_NUM_DOORBELLS 256
-#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
-
-#define GUC_DB_SIZE (PAGE_SIZE)
-#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
* registers, where first register holds data treated as message header,
* and other registers are used to hold message payload.
*
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
*
* +-----------+---------+---------+---------+
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
* field.
*/
+#define GUC_MAX_MMIO_MSG_LEN 8
+
#define INTEL_GUC_MSG_TYPE_SHIFT 28
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
#define INTEL_GUC_MSG_DATA_SHIFT 16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
+enum intel_guc_sleep_state_status {
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d86084742a4a..57e7ad522c2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
+#define GUC_NUM_DOORBELLS 256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+ u32 db_status;
+#define GUC_DOORBELL_DISABLED 0
+#define GUC_DOORBELL_ENABLED 1
+
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d46e87..1570dcbe249c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
return client->vaddr + client->doorbell_offset;
}
-static void __create_doorbell(struct intel_guc_client *client)
+static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
+ return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+}
+
+static void __init_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
doorbell->cookie = 0;
}
-static void __destroy_doorbell(struct intel_guc_client *client)
+static void __fini_doorbell(struct intel_guc_client *client)
{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
- doorbell->cookie = 0;
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
* release the doorbell
*/
- if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
}
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
return -ENODEV; /* internal setup error, should never happen */
__update_doorbell_desc(client, client->doorbell_id);
- __create_doorbell(client);
+ __init_doorbell(client);
ret = __guc_allocate_doorbell(client->guc, client->stage_id);
if (ret) {
- __destroy_doorbell(client);
+ __fini_doorbell(client);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
GEM_BUG_ON(!has_doorbell(client));
- __destroy_doorbell(client);
+ __fini_doorbell(client);
ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
if (ret)
DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
-static void guc_proc_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
+static void guc_proc_desc_fini(struct intel_guc_client *client)
+{
+ struct guc_process_desc *desc;
+
+ desc = __get_process_desc(client);
+ memset(desc, 0, sizeof(*desc));
+}
+
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-static void guc_stage_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc_client *client)
{
+ struct intel_guc *guc = client->guc;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->desc_private = ptr_to_u64(client);
}
-static void guc_stage_desc_fini(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_reset_wq(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
-
- desc->head = 0;
- desc->tail = 0;
-}
-
static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) {
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
if (submit)
port_assign(port, last);
port++;
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -791,19 +793,8 @@ done:
static void guc_dequeue(struct intel_engine_cs *engine)
{
- unsigned long flags;
- bool submit;
-
- local_irq_save(flags);
-
- spin_lock(&engine->timeline.lock);
- submit = __guc_dequeue(engine);
- spin_unlock(&engine->timeline.lock);
-
- if (submit)
+ if (__guc_dequeue(engine))
guc_submit(engine);
-
- local_irq_restore(flags);
}
static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 drbregl;
bool valid;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- drbregl = I915_READ(GEN8_DRBREGL(db_id));
- valid = drbregl & GEN8_DRB_VALID;
+ valid = __doorbell_valid(guc, db_id);
if (test_bit(db_id, guc->doorbell_bitmap) == valid)
return true;
- DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
- db_id, drbregl, yesno(valid));
+ DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
+ db_id, yesno(valid));
return false;
}
static bool guc_verify_doorbells(struct intel_guc *guc)
{
+ bool doorbells_ok = true;
u16 db_id;
for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
if (!doorbell_ok(guc, db_id))
- return false;
-
- return true;
-}
-
-static int guc_clients_doorbell_init(struct intel_guc *guc)
-{
- int ret;
-
- ret = create_doorbell(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = create_doorbell(guc->preempt_client);
- if (ret) {
- destroy_doorbell(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void guc_clients_doorbell_fini(struct intel_guc *guc)
-{
- /*
- * By the time we're here, GuC has already been reset.
- * Instead of trying (in vain) to communicate with it, let's just
- * cleanup the doorbell HW and our internal state.
- */
- if (guc->preempt_client) {
- __destroy_doorbell(guc->preempt_client);
- __update_doorbell_desc(guc->preempt_client,
- GUC_DOORBELL_INVALID);
- }
+ doorbells_ok = false;
- if (guc->execbuf_client) {
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client,
- GUC_DOORBELL_INVALID);
- }
+ return doorbells_ok;
}
/**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
client->vaddr = vaddr;
+ ret = reserve_doorbell(client);
+ if (ret)
+ goto err_vaddr;
+
client->doorbell_offset = __select_cacheline(guc);
/*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- guc_proc_desc_init(guc, client);
- guc_stage_desc_init(guc, client);
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
priority, client, client->engines, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
static void guc_client_free(struct intel_guc_client *client)
{
unreserve_doorbell(client);
- guc_stage_desc_fini(client->guc, client);
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
}
+static int __guc_client_enable(struct intel_guc_client *client)
+{
+ int ret;
+
+ guc_proc_desc_init(client);
+ guc_stage_desc_init(client);
+
+ ret = create_doorbell(client);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+ return ret;
+}
+
+static void __guc_client_disable(struct intel_guc_client *client)
+{
+ /*
+ * By the time we're here, GuC may have already been reset. if that is
+ * the case, instead of trying (in vain) to communicate with it, let's
+ * just cleanup the doorbell HW and our internal state.
+ */
+ if (intel_guc_is_alive(client->guc))
+ destroy_doorbell(client);
+ else
+ __fini_doorbell(client);
+
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+}
+
+static int guc_clients_enable(struct intel_guc *guc)
+{
+ int ret;
+
+ ret = __guc_client_enable(guc->execbuf_client);
+ if (ret)
+ return ret;
+
+ if (guc->preempt_client) {
+ ret = __guc_client_enable(guc->preempt_client);
+ if (ret) {
+ __guc_client_disable(guc->execbuf_client);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void guc_clients_disable(struct intel_guc *guc)
+{
+ if (guc->preempt_client)
+ __guc_client_disable(guc->preempt_client);
+
+ if (guc->execbuf_client)
+ __guc_client_disable(guc->execbuf_client);
+}
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
- guc_reset_wq(guc->execbuf_client);
- if (guc->preempt_client)
- guc_reset_wq(guc->preempt_client);
-
err = intel_guc_sample_forcewake(guc);
if (err)
return err;
- err = guc_clients_doorbell_init(guc);
+ err = guc_clients_enable(guc);
if (err)
return err;
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
- guc_clients_doorbell_fini(guc);
+ guc_clients_disable(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc95543..1bf487f94254 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
#define KEY_LOAD_TRIES 5
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+
+ return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+ int ret, i, tries = 2;
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(intel_dig_port, bksv);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv))
+ break;
+ }
+ if (i == tries) {
+ DRM_DEBUG_KMS("Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ bool capable = false;
+ u8 bksv[5];
+
+ if (!shim)
+ return capable;
+
+ if (shim->hdcp_capable) {
+ shim->hdcp_capable(intel_dig_port, &capable);
+ } else {
+ if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+ capable = true;
+ }
+
+ return capable;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
}
static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
- int i, ones = 0;
- /* KSV has 20 1's and 20 0's */
- for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
- ones += hweight8(ksv[i]);
- if (ones != 20)
- return false;
- return true;
-}
-
-static
int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
+ DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_ERROR("Max Topology Limit Exceeded\n");
+ DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+ DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
goto err;
}
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_ERROR("Panel is not HDCP capable\n");
+ DRM_DEBUG_KMS("Panel is not HDCP capable\n");
return -EINVAL;
}
}
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
memset(&bksv, 0, sizeof(bksv));
- /* HDCP spec states that we must retry the bksv if it is invalid */
- for (i = 0; i < tries; i++) {
- ret = shim->read_bksv(intel_dig_port, bksv.shim);
- if (ret)
- return ret;
- if (intel_hdcp_is_ksv_valid(bksv.shim))
- break;
- }
- if (i == tries) {
- DRM_ERROR("HDCP failed, Bksv is invalid\n");
- return -ENODEV;
- }
+ ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+ if (ret < 0)
+ return ret;
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
return -ETIMEDOUT;
}
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
return 0;
}
-static
-struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
-{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
-}
-
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
- ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
DRM_ERROR("Failed to disable HDCP signalling\n");
return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_auth(conn_to_dig_port(connector),
- connector->hdcp_shim);
+ ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
if (!ret)
return 0;
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
_intel_hdcp_disable(connector);
}
- DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+ return container_of(hdcp, struct intel_connector, hdcp);
+}
+
static void intel_hdcp_check_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(to_delayed_work(work),
- struct intel_connector,
- hdcp_check_work);
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&connector->hdcp_check_work,
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
static void intel_hdcp_prop_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(work,
- struct intel_connector,
- hdcp_prop_work);
+ struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+ prop_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
/*
* This worker is only used to flip between ENABLED/DESIRED. Either of
- * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+ * those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
state = connector->base.state;
- state->content_protection = connector->hdcp_value;
+ state->content_protection = hdcp->value;
}
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
}
int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim)
+ const struct intel_hdcp_shim *shim)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
if (ret)
return ret;
- connector->hdcp_shim = hdcp_shim;
- mutex_init(&connector->hdcp_mutex);
- INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
- INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+ hdcp->shim = shim;
+ mutex_init(&hdcp->mutex);
+ INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+ INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
ret = _intel_hdcp_enable(connector);
if (ret)
goto out;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
- schedule_delayed_work(&connector->hdcp_check_work,
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
int intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
ret = _intel_hdcp_disable(connector);
}
- mutex_unlock(&connector->hdcp_mutex);
- cancel_delayed_work_sync(&connector->hdcp_check_work);
+ mutex_unlock(&hdcp->mutex);
+ cancel_delayed_work_sync(&hdcp->check_work);
return ret;
}
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/* Implements Part 3 of the HDCP authorization procedure */
int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
I915_READ(PORT_HDCP_STATUS(port)));
ret = -ENXIO;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
- if (connector->hdcp_shim->check_link(intel_dig_port)) {
- if (connector->hdcp_value !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value =
- DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
}
goto out;
}
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
if (ret) {
DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d7234e03fdb0..07e803a604bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,14 +152,25 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static void g4x_write_infoframe(struct drm_encoder *encoder,
+static int hsw_dip_data_size(unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
+static void g4x_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
int i;
@@ -186,31 +201,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
-static void ibx_write_infoframe(struct drm_encoder *encoder,
+static void ibx_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -243,11 +256,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -255,7 +267,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +275,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void cpt_write_infoframe(struct drm_encoder *encoder,
+static void cpt_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -306,10 +317,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
@@ -321,14 +332,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void vlv_write_infoframe(struct drm_encoder *encoder,
+static void vlv_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -361,18 +371,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,21 +389,21 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct drm_encoder *encoder,
+static void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- int data_size = type == DP_SDP_VSC ?
- VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+ int data_size;
int i;
u32 val = I915_READ(ctl_reg);
+ data_size = hsw_dip_data_size(type);
+
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -415,10 +424,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +452,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
-static void intel_write_infoframe(struct drm_encoder *encoder,
+static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -457,20 +466,20 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
return;
/* Insert the 'hole' (see big comment above) at position 3 */
- buffer[0] = buffer[1];
- buffer[1] = buffer[2];
- buffer[2] = buffer[3];
+ memmove(&buffer[0], &buffer[1], 3);
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder,
+ crtc_state,
+ frame->any.type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
struct drm_connector *connector = &intel_hdmi->attached_connector->base;
@@ -487,8 +496,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
return;
}
- if (crtc_state->ycbcr420)
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
@@ -503,10 +514,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
@@ -520,11 +532,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
static void
-intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -537,20 +550,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
if (ret < 0)
return;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void g4x_set_infoframes(struct drm_encoder *encoder,
+static void g4x_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -658,11 +672,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
mode->crtc_htotal/2 % pixels_per_group == 0);
}
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
u32 val = 0;
@@ -690,18 +704,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
return val != 0;
}
-static void ibx_set_infoframes(struct drm_encoder *encoder,
+static void ibx_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -743,14 +757,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void cpt_set_infoframes(struct drm_encoder *encoder,
+static void cpt_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -786,18 +800,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void vlv_set_infoframes(struct drm_encoder *encoder,
+static void vlv_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -839,12 +852,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void hsw_set_infoframes(struct drm_encoder *encoder,
+static void hsw_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
@@ -966,13 +979,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_ERROR("Failed to output aksv (%d)\n", ret);
+ DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -985,7 +998,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
return ret;
}
@@ -997,7 +1010,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
return ret;
}
@@ -1010,7 +1023,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1025,7 +1038,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
return ret;
}
@@ -1038,7 +1051,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1053,7 +1066,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1071,7 +1084,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
return ret;
}
@@ -1218,7 +1231,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1439,7 +1452,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_dig_port->set_infoframes(&encoder->base, false,
+ intel_dig_port->set_infoframes(encoder,
+ false,
old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1598,6 +1612,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1625,7 +1641,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->base.crtc)
continue;
- if (crtc_state->ycbcr420) {
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1646,7 +1662,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display WA #1139: glk */
if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- crtc_state->base.adjusted_mode.htotal > 5460)
+ adjusted_mode->htotal > 5460)
+ return false;
+
+ /* Display Wa_1405510057:icl */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ bpc == 10 && IS_ICELAKE(dev_priv) &&
+ (adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
return true;
@@ -1670,7 +1693,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
*clock_12bpc /= 2;
*clock_10bpc /= 2;
*clock_8bpc /= 2;
- config->ycbcr420 = true;
+ config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
if (skl_update_scaler_crtc(config)) {
@@ -1704,6 +1727,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
@@ -1974,7 +1998,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
intel_hdmi_prepare(encoder, pipe_config);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
@@ -1992,7 +2016,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2063,7 +2087,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2075,13 +2099,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_phy_release_cl2_override(encoder);
}
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ i915_debugfs_connector_add(connector);
+
+ return ret;
+}
+
static void intel_hdmi_destroy(struct drm_connector *connector)
{
if (intel_attached_hdmi(connector)->cec_notifier)
cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
- kfree(to_intel_connector(connector)->detect_edid);
- drm_connector_cleanup(connector);
- kfree(connector);
+
+ intel_connector_destroy(connector);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2090,7 +2127,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
+ .late_register = intel_hdmi_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2110,11 +2147,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+
+ if (!HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
}
/*
@@ -2325,9 +2367,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
intel_dig_port->set_infoframes = g4x_set_infoframes;
intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev_priv)) {
- intel_dig_port->write_infoframe = hsw_write_infoframe;
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+ if (intel_dig_port->lspcon.active) {
+ intel_dig_port->write_infoframe =
+ lspcon_write_infoframe;
+ intel_dig_port->set_infoframes = lspcon_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ lspcon_infoframe_enabled;
+ } else {
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ hsw_infoframe_enabled;
+ intel_dig_port->write_infoframe = hsw_write_infoframe;
+ }
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2486,5 +2537,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 9a8018130237..e24174d08fed 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
/**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
* @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
*
- * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
* storms. Only the pin specific stats and state are changed, the caller is
* responsible for further action.
*
- * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
* stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
- * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
*
* The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
* and should only be adjusted for automated hotplug testing.
*
- * Return true if an irq storm was detected on @pin.
+ * Return true if an IRQ storm was detected on @pin.
*/
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
+ enum hpd_pin pin, bool long_hpd)
{
- unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
+ unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
- const int threshold = dev_priv->hotplug.hpd_storm_threshold;
+ const int increment = long_hpd ? 10 : 1;
+ const int threshold = hpd->hpd_storm_threshold;
bool storm = false;
+ if (!threshold ||
+ (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ return false;
+
if (!time_in_range(jiffies, start, end)) {
- dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
- dev_priv->hotplug.stats[pin].count = 0;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
- } else if (dev_priv->hotplug.stats[pin].count > threshold &&
- threshold) {
- dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+ hpd->stats[pin].last_jiffies = jiffies;
+ hpd->stats[pin].count = 0;
+ }
+
+ hpd->stats[pin].count += increment;
+ if (hpd->stats[pin].count > threshold) {
+ hpd->stats[pin].state = HPD_MARK_DISABLED;
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- dev_priv->hotplug.stats[pin].count++;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
- dev_priv->hotplug.stats[pin].count);
+ hpd->stats[pin].count);
}
return storm;
}
-static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
@@ -348,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
- /* Disable hotplug on connectors that hit an irq storm. */
- intel_hpd_irq_storm_disable(dev_priv);
+ /* Enable polling for connectors which had HPD IRQ storms */
+ intel_hpd_irq_storm_switch_to_polling(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -474,15 +491,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
queue_hp = true;
}
- if (!long_hpd)
- continue;
-
- if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
+ queue_hp = true;
}
}
+ /*
+ * Disable any IRQs that storms were detected on. Polling enablement
+ * happens later in our hotplug work.
+ */
if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540dd280..bc27b691d824 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns positive value if HuC firmware is loaded and verified
- * and -ENODEV if HuC is not present.
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- u32 status;
+ bool status;
if (!HAS_HUC(dev_priv))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab93fdd..802d0394ccc4 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..d7fa301b5ec7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
-static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- if (unlikely(execlists->no_priolist))
- prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &execlists->queue.rb_root.rb_node;
- while (*parent) {
- rb = *parent;
- p = to_priolist(rb);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- return p;
- }
- }
-
- if (prio == I915_PRIORITY_NORMAL) {
- p = &execlists->default_priolist;
- } else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
- * allocation failure we have to disable all scheduling.
- * Requests will then be executed in fifo, and schedule
- * will ensure that dependencies are emitted in fifo.
- * There will be still some reordering with existing
- * requests, so if userspace lied about their
- * dependencies that reordering may be visible.
- */
- execlists->no_priolist = true;
- goto find_priolist;
- }
- }
-
- p->priority = prio;
- INIT_LIST_HEAD(&p->requests);
- rb_link_node(&p->node, rb, parent);
- rb_insert_color_cached(&p->node, &execlists->queue, first);
-
- return p;
-}
-
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct i915_request *rq, *rn;
- struct i915_priolist *uninitialized_var(p);
- int last_prio = I915_PRIORITY_INVALID;
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *uninitialized_var(pl);
+ int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
lockdep_assert_held(&engine->timeline.lock);
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
&engine->timeline.requests,
link) {
if (i915_request_completed(rq))
- return;
+ break;
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
+ GEM_BUG_ON(rq->hw_context->active);
+
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != last_prio) {
- last_prio = rq_prio(rq);
- p = lookup_priolist(engine, last_prio);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
}
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_add(&rq->sched.link, pl);
- GEM_BUG_ON(p->priority != rq_prio(rq));
- list_add(&rq->sched.link, &p->requests);
+ active = rq;
+ }
+
+ /*
+ * The active request is now effectively the start of a new client
+ * stream, so give it the equivalent small priority bump to prevent
+ * it being gazumped a second time by another peer.
+ */
+ if (!(prio & I915_PRIORITY_NEWCLIENT)) {
+ prio |= I915_PRIORITY_NEWCLIENT;
+ list_move_tail(&active->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
}
}
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- unsigned long flags;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
__unwind_incomplete_requests(engine);
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
+ GEM_BUG_ON(rq->hw_context->active);
+
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
+ rq->hw_context->active = rq->engine;
}
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
+ rq->hw_context->active = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
@@ -417,9 +374,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_context *ce = rq->hw_context;
- struct i915_hw_ppgtt *ppgtt =
- rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +386,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
+ if (!i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
/*
@@ -681,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -701,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
/*
* If GVT overrides us we only ever submit
@@ -715,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request) to the second port.
*/
if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context)) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ ctx_single_port_submission(rq->hw_context))
goto done;
- }
GEM_BUG_ON(last->hw_context == rq->hw_context);
@@ -730,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
+
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -815,6 +767,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
+ const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -824,8 +778,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = execlists->csb_write_reset;
- WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
}
static void nop_submission_tasklet(unsigned long data)
@@ -866,27 +820,34 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_request_completed(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ continue;
+
+ dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- INIT_LIST_HEAD(&rq->sched.link);
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
@@ -1088,13 +1049,7 @@ static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
- list_add_tail(&node->link,
- &lookup_priolist(engine, prio)->requests);
-}
-
-static void __update_queue(struct intel_engine_cs *engine, int prio)
-{
- engine->execlists.queue_priority = prio;
+ list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1113,7 +1068,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
if (prio > engine->execlists.queue_priority) {
- __update_queue(engine, prio);
+ engine->execlists.queue_priority = prio;
__submit_queue_imm(engine);
}
}
@@ -1136,139 +1091,6 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static struct i915_request *sched_to_request(struct i915_sched_node *node)
-{
- return container_of(node, struct i915_request, sched);
-}
-
-static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
-{
- struct intel_engine_cs *engine = sched_to_request(node)->engine;
-
- GEM_BUG_ON(!locked);
-
- if (engine != locked) {
- spin_unlock(&locked->timeline.lock);
- spin_lock(&engine->timeline.lock);
- }
-
- return engine;
-}
-
-static void execlists_schedule(struct i915_request *request,
- const struct i915_sched_attr *attr)
-{
- struct i915_priolist *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
- struct i915_dependency *dep, *p;
- struct i915_dependency stack;
- const int prio = attr->priority;
- LIST_HEAD(dfs);
-
- GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
-
- if (i915_request_completed(request))
- return;
-
- if (prio <= READ_ONCE(request->sched.attr.priority))
- return;
-
- /* Need BKL in order to use the temporary link inside i915_dependency */
- lockdep_assert_held(&request->i915->drm.struct_mutex);
-
- stack.signaler = &request->sched;
- list_add(&stack.dfs_link, &dfs);
-
- /*
- * Recursively bump all dependent priorities to match the new request.
- *
- * A naive approach would be to use recursion:
- * static void update_priorities(struct i915_sched_node *node, prio) {
- * list_for_each_entry(dep, &node->signalers_list, signal_link)
- * update_priorities(dep->signal, prio)
- * queue_request(node);
- * }
- * but that may have unlimited recursion depth and so runs a very
- * real risk of overunning the kernel stack. Instead, we build
- * a flat list of all dependencies starting with the current request.
- * As we walk the list of dependencies, we add all of its dependencies
- * to the end of the list (this may include an already visited
- * request) and continue to walk onwards onto the new dependencies. The
- * end result is a topological list of requests in reverse order, the
- * last element in the list is the request we must execute first.
- */
- list_for_each_entry(dep, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- /*
- * Within an engine, there can be no cycle, but we may
- * refer to the same dependency chain multiple times
- * (redundant dependencies are not eliminated) and across
- * engines.
- */
- list_for_each_entry(p, &node->signalers_list, signal_link) {
- GEM_BUG_ON(p == dep); /* no cycles! */
-
- if (i915_sched_node_signaled(p->signaler))
- continue;
-
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
- if (prio > READ_ONCE(p->signaler->attr.priority))
- list_move_tail(&p->dfs_link, &dfs);
- }
- }
-
- /*
- * If we didn't need to bump any existing priorities, and we haven't
- * yet submitted this request (i.e. there is no potential race with
- * execlists_submit_request()), we can set our own priority and skip
- * acquiring the engine locks.
- */
- if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
- GEM_BUG_ON(!list_empty(&request->sched.link));
- request->sched.attr = *attr;
- if (stack.dfs_link.next == stack.dfs_link.prev)
- return;
- __list_del_entry(&stack.dfs_link);
- }
-
- last = NULL;
- engine = request->engine;
- spin_lock_irq(&engine->timeline.lock);
-
- /* Fifo and depth-first replacement ensure our deps execute before us */
- list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- INIT_LIST_HEAD(&dep->dfs_link);
-
- engine = sched_lock_engine(node, engine);
-
- if (prio <= node->attr.priority)
- continue;
-
- node->attr.priority = prio;
- if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = lookup_priolist(engine, prio);
- last = engine;
- }
- GEM_BUG_ON(pl->priority != prio);
- list_move_tail(&node->link, &pl->requests);
- }
-
- if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit)) {
- /* defer submission until after all of our updates */
- __update_queue(engine, prio);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
- }
-
- spin_unlock_irq(&engine->timeline.lock);
-}
-
static void execlists_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
@@ -1284,6 +1106,28 @@ static void execlists_context_destroy(struct intel_context *ce)
static void execlists_context_unpin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine;
+
+ /*
+ * The tasklet may still be using a pointer to our state, via an
+ * old request. However, since we know we only unpin the context
+ * on retirement of the following request, we know that the last
+ * request referencing us will have had a completion CS interrupt.
+ * If we see that it is still active, it means that the tasklet hasn't
+ * had the chance to run yet; let it run before we teardown the
+ * reference it may use.
+ */
+ engine = READ_ONCE(ce->active);
+ if (unlikely(engine)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ GEM_BUG_ON(READ_ONCE(ce->active));
+ }
+
i915_gem_context_unpin_hw_id(ce->gem_context);
intel_ring_unpin(ce->ring);
@@ -1387,6 +1231,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(!ctx->ppgtt);
if (likely(ce->pin_count++))
return ce;
@@ -1443,9 +1288,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1305,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1496,7 +1342,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1565,18 +1411,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
-
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
@@ -1691,7 +1525,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1730,8 +1564,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
*/
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
- if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
- CACHELINE_BYTES))) {
+ if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+ CACHELINE_BYTES))) {
ret = -EINVAL;
break;
}
@@ -1793,6 +1627,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1817,7 +1653,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1840,7 +1676,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
return 0;
}
@@ -1914,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
unsigned long flags;
u32 *regs;
- GEM_TRACE("%s request global=%x, current=%d\n",
+ GEM_TRACE("%s request global=%d, current=%d\n",
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
@@ -2041,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->gem_context->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
@@ -2121,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
- if (request->engine->id == VCS)
+ if (request->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
}
@@ -2139,7 +1974,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2253,7 +2088,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret)
return ret;
@@ -2306,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
- engine->schedule = execlists_schedule;
+ engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
@@ -2394,12 +2229,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2429,24 +2258,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
- execlists->csb_read =
- i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- if (csb_force_mmio(i915)) {
- execlists->csb_status = (u32 __force *)
- (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write = (u32 __force *)execlists->csb_read;
- execlists->csb_write_reset =
- _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
- GEN8_CSB_ENTRIES - 1);
- } else {
- execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
- }
reset_csb_pointers(execlists);
return 0;
@@ -2476,10 +2293,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2491,11 +2304,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -2644,7 +2456,6 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
bool rcs = engine->class == RENDER_CLASS;
@@ -2716,12 +2527,12 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
+ if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ppgtt, regs);
+ ASSIGN_CTX_PML4(ctx->ppgtt, regs);
}
if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5f2b81..96a8d9524b0c 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
{
struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
}
}
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct drm_dp_dpcd_ident *ident;
+ u32 vendor_oui;
+
+ if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+ DRM_ERROR("Can't read description\n");
+ return false;
+ }
+
+ ident = &dp->desc.ident;
+ vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+ ident->oui[2];
+
+ switch (vendor_oui) {
+ case LSPCON_VENDOR_MCA_OUI:
+ lspcon->vendor = LSPCON_VENDOR_MCA;
+ DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+ break;
+
+ case LSPCON_VENDOR_PARADE_OUI:
+ lspcon->vendor = LSPCON_VENDOR_PARADE;
+ DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+ break;
+
+ default:
+ DRM_ERROR("Invalid/Unknown vendor OUI\n");
+ return false;
+ }
+
+ return true;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
return true;
}
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (drm_mode_is_420_only(info, adjusted_mode) &&
+ connector->ycbcr_420_allowed) {
+ crtc_state->port_clock /= 2;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ crtc_state->lspcon_downsampling = true;
+ }
+}
+
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
- lspcon->active = true;
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
return true;
}
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
}
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+ u8 avi_if_ctrl;
+ u8 retry;
+ ssize_t ret;
+
+ /* Check if LSPCON FW is ready for data */
+ for (retry = 0; retry < 5; retry++) {
+ if (retry)
+ usleep_range(200, 300);
+
+ ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+ &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read AVI IF control\n");
+ return false;
+ }
+
+ if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+ return true;
+ }
+
+ DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+ return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+ uint8_t *avi_buf)
+{
+ u8 avi_if_ctrl;
+ u8 block_count = 0;
+ u8 *data;
+ uint16_t reg;
+ ssize_t ret;
+
+ while (block_count < 4) {
+ if (!lspcon_parade_fw_ready(aux)) {
+ DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
+ block_count);
+ return false;
+ }
+
+ reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+ data = avi_buf + block_count * 8;
+ ret = drm_dp_dpcd_write(aux, reg, data, 8);
+ if (ret < 0) {
+ DRM_ERROR("Failed to write AVI IF block %d\n",
+ block_count);
+ return false;
+ }
+
+ /*
+ * Once a block of data is written, we have to inform the FW
+ * about this by writing into avi infoframe control register:
+ * - set the kickoff bit[7] to 1
+ * - write the block no. to bits[1:0]
+ */
+ reg = LSPCON_PARADE_AVI_IF_CTRL;
+ avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+ ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to update (0x%x), block %d\n",
+ reg, block_count);
+ return false;
+ }
+
+ block_count++;
+ }
+
+ DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+ const uint8_t *frame,
+ ssize_t len)
+{
+ uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+ /*
+ * Parade's frames contains 32 bytes of data, divided
+ * into 4 frames:
+ * Token byte (first byte of first frame, must be non-zero)
+ * HB0 to HB2 from AVI IF (3 bytes header)
+ * PB0 to PB27 from AVI IF (28 bytes data)
+ * So it should look like this
+ * first block: | <token> <HB0-HB2> <DB0-DB3> |
+ * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+ */
+
+ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+ DRM_ERROR("Invalid length of infoframes\n");
+ return false;
+ }
+
+ memcpy(&avi_if[1], frame, len);
+
+ if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+ DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+ const uint8_t *buffer, ssize_t len)
+{
+ int ret;
+ uint32_t val = 0;
+ uint32_t retry;
+ uint16_t reg;
+ const uint8_t *data = buffer;
+
+ reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+ while (val < len) {
+ /* DPCD write for AVI IF can fail on a slow FW day, so retry */
+ for (retry = 0; retry < 5; retry++) {
+ ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+ if (ret == 1) {
+ break;
+ } else if (retry < 4) {
+ mdelay(50);
+ continue;
+ } else {
+ DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+ return false;
+ }
+ }
+ val++; reg++; data++;
+ }
+
+ val = 0;
+ reg = LSPCON_MCA_AVI_IF_CTRL;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+ val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+ val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+ ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ val = 0;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ if (val == LSPCON_MCA_AVI_IF_HANDLED)
+ DRM_DEBUG_KMS("AVI IF handled by FW\n");
+
+ return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ bool ret;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+
+ /* LSPCON only needs AVI IF */
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ return;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+
+ if (!ret) {
+ DRM_ERROR("Failed to write AVI infoframes\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ ssize_t ret;
+ union hdmi_infoframe frame;
+ uint8_t buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
+ const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+
+ if (!lspcon->active) {
+ DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+ return;
+ }
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ mode, is_hdmi2_sink);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ if (crtc_state->lspcon_downsampling)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ } else {
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ }
+
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ false, is_hdmi2_sink);
+
+ ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("Failed to pack AVI IF\n");
+ return;
+ }
+
+ dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+ buf, ret);
+}
+
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ return enc_to_intel_lspcon(&encoder->base)->active;
+}
+
void lspcon_resume(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &dp->attached_connector->base;
if (!HAS_LSPCON(dev_priv)) {
DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
- /*
- * In the SW state machine, lets Put LSPCON in PCON mode only.
- * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
- * 2.0 sinks.
- */
- if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- DRM_ERROR("LSPCON mode change to PCON failed\n");
- return false;
- }
- }
-
if (!intel_dp_read_dpcd(dp)) {
DRM_ERROR("LSPCON DPCD read failed\n");
return false;
}
- drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
+ if (!lspcon_detect_vendor(lspcon)) {
+ DRM_ERROR("LSPCON vendor detection failed\n");
+ return false;
+ }
+ connector->ycbcr_420_allowed = true;
+ lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b0885ba5..e6c5d985ea0a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
- struct intel_connector base;
-};
-
struct intel_lvds_pps {
/* 100us units */
int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
struct intel_lvds_pps init_pps;
u32 init_lvds_val;
- struct intel_lvds_connector *attached_connector;
+ struct intel_connector *attached_connector;
};
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct intel_lvds_connector, base.base);
-}
-
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe)
{
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
+ lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
pipe_config->pipe_bpp = lvds_bpp;
}
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
/* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- return drm_add_edid_modes(connector, lvds_connector->base.edid);
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ return drm_add_edid_modes(connector, intel_connector->edid);
- mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-/**
- * intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
-static void intel_lvds_destroy(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds_connector =
- to_lvds_connector(connector);
-
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- kfree(lvds_connector->base.edid);
-
- intel_panel_fini(&lvds_connector->base.panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_lvds_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return i915_modparams.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
- if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
- > 112999)
+ if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
return true;
if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
- struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
- if (!lvds_connector) {
- kfree(lvds_encoder);
- return;
- }
-
- if (intel_connector_init(&lvds_connector->base) < 0) {
- kfree(lvds_connector);
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = lvds_connector;
+ lvds_encoder->attached_connector = intel_connector;
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
- intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
} else {
edid = ERR_PTR(-ENOENT);
}
- lvds_connector->base.edid = edid;
+ intel_connector->edid = edid;
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
- kfree(lvds_connector);
+ intel_connector_free(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b4166d32..b8f106d9ecf8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
opregion->acpi->cadl[i] = 0;
}
-void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->acpi) {
- intel_didl_outputs(dev_priv);
- intel_setup_cadls(dev_priv);
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
- register_acpi_notifier(&opregion->acpi_notifier);
- }
-
- if (opregion->asle) {
- opregion->asle->tche = ASLE_TCHE_BLC_EN;
- opregion->asle->ardy = ASLE_ARDY_READY;
- }
-}
-
-void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->asle)
- opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
- cancel_work_sync(&dev_priv->opregion.asle_work);
-
- if (opregion->acpi) {
- opregion->acpi->drdy = 0;
-
- unregister_acpi_notifier(&opregion->acpi_notifier);
- opregion->acpi_notifier.notifier_call = NULL;
- }
-
- /* just clear all opregion memory pointers now */
- memunmap(opregion->header);
- if (opregion->rvda) {
- memunmap(opregion->rvda);
- opregion->rvda = NULL;
- }
- if (opregion->vbt_firmware) {
- kfree(opregion->vbt_firmware);
- opregion->vbt_firmware = NULL;
- }
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->vbt = NULL;
- opregion->lid_state = NULL;
-}
-
static void swsci_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
return ret - 1;
}
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi_notifier.notifier_call =
+ intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
+ }
+
+ intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ intel_didl_outputs(i915);
+ intel_setup_cadls(i915);
+
+ /*
+ * Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them.
+ */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+ }
+
+ if (opregion->asle) {
+ opregion->asle->tche = ASLE_TCHE_BLC_EN;
+ opregion->asle->ardy = ASLE_ARDY_READY;
+ }
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (opregion->asle)
+ opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+ cancel_work_sync(&i915->opregion.asle_work);
+
+ if (opregion->acpi)
+ opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ intel_opregion_suspend(i915, PCI_D1);
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi_notifier.notifier_call) {
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
+ }
+
+ /* just clear all opregion memory pointers now */
+ memunmap(opregion->header);
+ if (opregion->rvda) {
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
+ }
+ if (opregion->vbt_firmware) {
+ kfree(opregion->vbt_firmware);
+ opregion->vbt_firmware = NULL;
+ }
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8cda3d..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
}
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
+{
+}
+
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e48e8bc..20ea7c99d13a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ err_put_bo:
return err;
}
-void intel_setup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
int ret;
@@ -1387,7 +1387,7 @@ out_free:
kfree(overlay);
}
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139e7b73..e6cd7b55c018 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
- !pipe_config->ycbcr420)
+ pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
goto done;
switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
static u32 vlv_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_get_pipe_from_connector(connector);
+ enum pipe pipe = intel_connector_get_pipe(connector);
return _vlv_get_backlight(dev_priv, pipe);
}
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_panel *panel = &connector->panel;
/* Disable the backlight */
- pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+ intel_panel_actually_set_backlight(old_conn_state, 0);
usleep_range(2000, 3000);
pwm_disable(panel->backlight.pwm);
}
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
return 0;
}
-void intel_panel_destroy_backlight(struct drm_connector *connector)
+static void intel_panel_destroy_backlight(struct intel_panel *panel)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_panel *panel = &intel_connector->panel;
-
/* dispose of the pwm */
if (panel->backlight.pwm)
pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
+ intel_panel_destroy_backlight(panel);
+
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 245f0022bcfd..a26b4eddda25 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
{
int cpp;
+ if (mem_value == 0)
+ return U32_MAX;
+
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * On some SNB machines (Thinkpad X220 Tablet at least)
+ * LP3 usage can cause vblank interrupts to be lost.
+ * The DEIIR bit will go high but it looks like the CPU
+ * never gets interrupted.
+ *
+ * It's not clear whether other interrupt source could
+ * be affected or if this is somehow limited to vblank
+ * interrupts only. To play it safe we disable LP3
+ * watermarks entirely.
+ */
+ if (dev_priv->wm.pri_latency[3] == 0 &&
+ dev_priv->wm.spr_latency[3] == 0 &&
+ dev_priv->wm.cur_latency[3] == 0)
+ return;
+
+ dev_priv->wm.pri_latency[3] = 0;
+ dev_priv->wm.spr_latency[3] = 0;
+ dev_priv->wm.cur_latency[3] = 0;
+
+ DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN6(dev_priv)) {
snb_wm_latency_quirk(dev_priv);
+ snb_wm_lp3_irq_quirk(dev_priv);
+ }
}
static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3159,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+ if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ intel_state->skip_intermediate_wm)
return 0;
a->pipe_enabled |= b->pipe_enabled;
@@ -3611,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
- IS_CANNONLAKE(dev_priv))
- return true;
-
- if (IS_SKYLAKE(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
- return true;
-
- return false;
+ return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
/*
@@ -3783,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
{
@@ -3797,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &cstate->base.adjusted_mode;
- total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+ total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
* 12GB/s is maximum BW supported by single DBuf slice.
*/
- if (total_data_bw >= GBps(12) || num_active > 1) {
+ if (num_active > 1 || total_data_bw >= GBps(12)) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
@@ -3813,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
}
static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
const struct drm_crtc_state *crtc_state;
const struct drm_crtc *crtc;
@@ -3919,73 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
-
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- /*
- * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
- * registers for now.
- */
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12) {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
-
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+ intel_display_power_put(dev_priv, power_domain);
+}
- intel_display_power_put(dev_priv, power_domain);
- }
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4138,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
return 0;
}
-static unsigned int
+static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct drm_plane_state *pstate,
+ const struct intel_plane_state *intel_pstate,
const int plane)
{
- struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ struct intel_plane *intel_plane =
+ to_intel_plane(intel_pstate->base.plane);
uint32_t data_rate;
uint32_t width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
+ u64 rate;
if (!intel_pstate->base.visible)
return 0;
- fb = pstate->fb;
+ fb = intel_pstate->base.fb;
format = fb->format->format;
if (intel_plane->id == PLANE_CURSOR)
@@ -4176,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height /= 2;
}
- data_rate = width * height * fb->format->cpp[plane];
+ data_rate = width * height;
down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
- return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+ rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+ rate *= fb->format->cpp[plane];
+ return rate;
}
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- * 3 * 4096 * 8192 * 4 < 2^32
- */
-static unsigned int
+static u64
skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
- unsigned int *plane_data_rate,
- unsigned int *uv_plane_data_rate)
+ u64 *plane_data_rate,
+ u64 *uv_plane_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- unsigned int total_data_rate = 0;
+ u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
@@ -4205,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
/* Calculate and cache data rate for each plane */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
enum plane_id plane_id = to_intel_plane(plane)->id;
- unsigned int rate;
+ u64 rate;
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
/* packed/y */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 0);
+ intel_pstate, 0);
plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
/* uv-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 1);
+ intel_pstate, 1);
uv_plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
}
return total_data_rate;
}
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ u64 *plane_data_rate)
+{
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ u64 total_data_rate = 0;
+
+ if (WARN_ON(!state))
+ return 0;
+
+ /* Calculate and cache data rate for each plane */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ u64 rate;
+
+ if (!intel_pstate->linked_plane) {
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ } else {
+ enum plane_id y_plane_id;
+
+ /*
+ * The slave plane might not iterate in
+ * drm_atomic_crtc_state_for_each_plane_state(),
+ * and needs the master plane state which may be
+ * NULL if we try get_new_plane_state(), so we
+ * always calculate from the master.
+ */
+ if (intel_pstate->slave)
+ continue;
+
+ /* Y plane rate is calculated on the slave */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ y_plane_id = intel_pstate->linked_plane->id;
+ plane_data_rate[y_plane_id] = rate;
+ total_data_rate += rate;
+
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 1);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ }
+ }
+
+ return total_data_rate;
+}
+
static uint16_t
skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
{
@@ -4297,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
enum plane_id plane_id = to_intel_plane(plane)->id;
+ struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
if (plane_id == PLANE_CURSOR)
continue;
- if (!pstate->visible)
+ /* slave plane must be invisible and calculated from master */
+ if (!pstate->visible || WARN_ON(plane_state->slave))
continue;
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ if (!plane_state->linked_plane) {
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+ uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ } else {
+ enum plane_id y_plane_id =
+ plane_state->linked_plane->id;
+
+ minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ }
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4317,23 +4408,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
uint16_t uv_minimum[I915_MAX_PLANES] = {};
- unsigned int total_data_rate;
+ u64 total_data_rate;
enum plane_id plane_id;
int num_active;
- unsigned int plane_data_rate[I915_MAX_PLANES] = {};
- unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+ u64 plane_data_rate[I915_MAX_PLANES] = {};
+ u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4343,11 +4433,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- total_data_rate = skl_get_total_relative_data_rate(cstate,
- plane_data_rate,
- uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
- alloc, &num_active);
+ if (INTEL_GEN(dev_priv) < 11)
+ total_data_rate =
+ skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ uv_plane_data_rate);
+ else
+ total_data_rate =
+ icl_get_total_relative_data_rate(cstate,
+ plane_data_rate);
+
+ skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4373,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4387,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- unsigned int data_rate, uv_data_rate;
+ u64 data_rate, uv_data_rate;
uint16_t plane_blocks, uv_plane_blocks;
if (plane_id == PLANE_CURSOR)
@@ -4401,13 +4498,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = minimum[plane_id];
- plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4416,12 +4512,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uv_data_rate = uv_plane_data_rate[plane_id];
uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
- total_data_rate);
+ uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+ /* Gen11+ uses a separate plane for UV watermarks */
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4475,7 +4573,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
uint32_t pixel_rate;
uint32_t crtc_htotal;
@@ -4518,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int plane_id)
+ struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
@@ -4531,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
- if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
return -EINVAL;
}
@@ -4560,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- if (plane_id == 1 && wp->is_planar)
+ if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[plane_id];
+ wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4625,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
@@ -4634,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
@@ -4644,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
uint32_t min_disp_buf_needed;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- result->plane_en = false;
- return 0;
- }
+ if (latency == 0)
+ return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4671,15 +4764,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
} else {
if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line))
- selected_result = min_fixed16(method1, method2);
- else if (latency >= wp->linetime_us)
- selected_result = min_fixed16(method1, method2);
- else
+ } else if (ddb_allocation >=
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
selected_result = method1;
+ }
}
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4736,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- result->plane_en = false;
-
/*
* If there are no valid level 0 watermarks, then we can't
* support this display configuration.
@@ -4755,17 +4855,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
- /*
- * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
- * disable wm level 1-7 on NV12 planes
- */
- if (wp->is_planar && level >= 1 &&
- (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
- result->plane_en = false;
- return 0;
- }
-
/* The number of lines are ignored for the level 0 watermark. */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
@@ -4775,43 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
+ uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
- int plane_id)
+ struct skl_wm_level *levels)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane = intel_pstate->base.plane;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint16_t ddb_blocks;
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id intel_plane_id = intel_plane->id;
+ struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
- ddb_blocks = plane_id ?
- skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
- skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
- &wm->wm[level];
- struct skl_wm_level *result_prev;
+ struct skl_wm_level *result = &levels[level];
- if (level)
- result_prev = plane_id ? &wm->uv_wm[level - 1] :
- &wm->wm[level - 1];
- else
- result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
-
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
+ ret = skl_compute_plane_wm(cstate,
intel_pstate,
ddb_blocks,
level,
@@ -4820,16 +4888,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result);
if (ret)
return ret;
- }
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
+ result_prev = result;
+ }
return 0;
}
static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4851,42 +4918,50 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t trans_min, trans_y_tile_min;
const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t trans_offset_b, res_blocks;
-
- if (!cstate->base.active)
- goto exit;
+ uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
- goto exit;
+ return;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
- goto exit;
+ return;
- trans_min = 0;
- if (INTEL_GEN(dev_priv) >= 10)
+ trans_min = 14;
+ if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
trans_offset_b = trans_min + trans_amount;
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
wp->y_tile_minimum);
- res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+ res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
- res_blocks = wm_l0->plane_res_b + trans_offset_b;
+ res_blocks = wm0_sel_res_b + trans_offset_b;
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4897,25 +4972,132 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
- return;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
+ }
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id, int color_plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->wm);
+ if (ret)
+ return ret;
+
+ skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->uv_wm);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
+ int ret;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane_id);
+ if (ret)
+ return ret;
}
-exit:
- trans_wm->plane_en = false;
+ return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->slave)
+ return 0;
+
+ if (plane_state->linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+ WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ y_plane_id, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
- const struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- struct skl_plane_wm *wm;
int ret;
/*
@@ -4927,44 +5109,15 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct skl_wm_params wm_params;
- enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
- uint16_t ddb_blocks;
-
- wm = &pipe_wm->planes[plane_id];
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate, &wm_params, 0);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params, wm, 0);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
+ else
+ ret = skl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
if (ret)
return ret;
-
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- if (wm_params.is_planar) {
- memset(&wm_params, 0, sizeof(struct skl_wm_params));
- wm->is_planar = true;
-
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate,
- &wm_params, 1);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params,
- wm, 1);
- if (ret)
- return ret;
- }
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -4977,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -4994,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5015,35 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- /* FIXME: add proper NV12 support for ICL. */
- if (INTEL_GEN(dev_priv) >= 11)
- return skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (wm->is_planar) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5051,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- if (l1->plane_en != l2->plane_en)
- return false;
+ return l1->plane_en == l2->plane_en &&
+ l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b;
+}
- /* If both planes aren't enabled, the rest shouldn't matter */
- if (!l1->plane_en)
- return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv);
+
+ for (level = 0; level <= max_level; level++) {
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+ !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ return false;
+ }
- return (l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b);
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5075,16 +5236,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx)
{
- enum pipe pipe;
+ int i;
- for_each_pipe(dev_priv, pipe) {
- if (pipe != ignore && entries[pipe] &&
- skl_ddb_entries_overlap(ddb, entries[pipe]))
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
return true;
}
@@ -5094,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5126,32 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5163,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5183,38 +5342,31 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
}
}
}
@@ -5309,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ skl_plane_wm_equals(dev_priv,
+ &old_crtc_state->wm.skl.optimal.planes[plane_id],
+ &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -5348,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ if (ret)
+ return ret;
+
+ ret = skl_wm_add_affected_planes(intel_state,
+ to_intel_crtc(crtc));
if (ret)
return ret;
@@ -5363,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5374,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5400,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5411,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5566,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
@@ -6116,14 +6307,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- dev_priv->ipc_enabled = false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
- !dev_priv->dram_info.symmetric_memory)
- dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
val = I915_READ(DISP_ARB_CTL2);
@@ -6137,11 +6322,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
void intel_init_ipc(struct drm_i915_private *dev_priv)
{
- dev_priv->ipc_enabled = false;
if (!HAS_IPC(dev_priv))
return;
- dev_priv->ipc_enabled = true;
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+ else
+ dev_priv->ipc_enabled = true;
+
intel_enable_ipc(dev_priv);
}
@@ -8735,6 +8924,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* This is not an Wa. Enable to reduce Sampler power */
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+ /* WaEnable32PlaneMode:icl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+ _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9312,8 +9505,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
- intel_fbc_init(dev_priv);
-
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..419e56342523 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,14 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
+ /* Disable PSR2 by default for all platforms */
+ if (i915_modparams.enable_psr == -1)
+ return false;
+
+ /* Cannot enable DSC and PSR2 simultaneously */
+ WARN_ON(crtc_state->dsc_params.compression_enable &&
+ crtc_state->has_psr2);
+
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -79,25 +87,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return EDP_PSR_TRANSCODER_A_SHIFT;
+ case TRANSCODER_B:
+ return EDP_PSR_TRANSCODER_B_SHIFT;
+ case TRANSCODER_C:
+ return EDP_PSR_TRANSCODER_C_SHIFT;
+ default:
+ MISSING_CASE(cpu_transcoder);
+ /* fallthrough */
+ case TRANSCODER_EDP:
+ return EDP_PSR_TRANSCODER_EDP_SHIFT;
+ }
+}
+
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
{
u32 debug_mask, mask;
+ enum transcoder cpu_transcoder;
+ u32 transcoders = BIT(TRANSCODER_EDP);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ transcoders |= BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C);
+
+ debug_mask = 0;
+ mask = 0;
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ int shift = edp_psr_shift(cpu_transcoder);
- mask = EDP_PSR_ERROR(TRANSCODER_EDP);
- debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
-
- if (INTEL_GEN(dev_priv) >= 8) {
- mask |= EDP_PSR_ERROR(TRANSCODER_A) |
- EDP_PSR_ERROR(TRANSCODER_B) |
- EDP_PSR_ERROR(TRANSCODER_C);
-
- debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
- EDP_PSR_POST_EXIT(TRANSCODER_B) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
- EDP_PSR_POST_EXIT(TRANSCODER_C) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+ mask |= EDP_PSR_ERROR(shift);
+ debug_mask |= EDP_PSR_POST_EXIT(shift) |
+ EDP_PSR_PRE_ENTRY(shift);
}
if (debug & I915_PSR_DEBUG_IRQ)
@@ -148,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
ktime_t time_ns = ktime_get();
+ u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
@@ -155,18 +181,32 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
BIT(TRANSCODER_C);
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- /* FIXME: Exit PSR and link train manually when this happens. */
- if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
- DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ int shift = edp_psr_shift(cpu_transcoder);
+
+ if (psr_iir & EDP_PSR_ERROR(shift)) {
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ dev_priv->psr.irq_aux_error = true;
- if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ mask |= EDP_PSR_ERROR(shift);
+ }
+
+ if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+ if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
dev_priv->psr.last_exit = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
@@ -180,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
}
+
+ if (mask) {
+ mask |= I915_READ(EDP_PSR_IMR);
+ I915_WRITE(EDP_PSR_IMR, mask);
+
+ schedule_work(&dev_priv->psr.work);
+ }
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -294,7 +341,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
psr_vsc.sdp_header.HB3 = 0x8;
}
- intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+ intel_dig_port->write_infoframe(&intel_dig_port->base,
+ crtc_state,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
@@ -458,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
@@ -503,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ if (dev_priv->psr.sink_not_reliable) {
+ DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
return;
}
@@ -553,11 +609,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ static const i915_reg_t regs[] = {
+ [TRANSCODER_A] = CHICKEN_TRANS_A,
+ [TRANSCODER_B] = CHICKEN_TRANS_B,
+ [TRANSCODER_C] = CHICKEN_TRANS_C,
+ [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+ !regs[cpu_transcoder].reg))
+ cpu_transcoder = TRANSCODER_A;
+
+ return regs[cpu_transcoder];
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
@@ -566,37 +642,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
hsw_psr_setup_aux(intel_dp);
if (dev_priv->psr.psr2_enabled) {
- u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+ i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+ cpu_transcoder);
+ u32 chicken = I915_READ(reg);
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
| PSR2_ADD_VERTICAL_LINE_COUNT);
else
chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
-
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+ I915_WRITE(reg, chicken);
}
+
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+ * mask LPSP to avoid dependency on other drivers that might block
+ * runtime_pm besides preventing other hw tracking issues now we
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ I915_WRITE(EDP_PSR_DEBUG, mask);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -646,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
@@ -656,49 +730,34 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void
-intel_psr_disable_source(struct intel_dp *intel_dp)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->psr.active) {
- i915_reg_t psr_status;
- u32 psr_status_mask;
-
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
- psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR2_CTL,
- I915_READ(EDP_PSR2_CTL) &
- ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
-
- } else {
- psr_status = EDP_PSR_STATUS;
- psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR_CTL,
- I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
- }
+ u32 val;
- /* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv,
- psr_status, psr_status_mask, 0,
- 2000))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ if (!dev_priv->psr.active) {
+ if (INTEL_GEN(dev_priv) >= 9)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ return;
+ }
- dev_priv->psr.active = false;
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- if (dev_priv->psr.psr2_enabled)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
+ dev_priv->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t psr_status;
+ u32 psr_status_mask;
lockdep_assert_held(&dev_priv->psr.lock);
@@ -707,7 +766,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Disabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
- intel_psr_disable_source(intel_dp);
+
+ intel_psr_exit(dev_priv);
+
+ if (dev_priv->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ psr_status = EDP_PSR_STATUS;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ /* Wait till PSR is idle */
+ if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
+ 2000))
+ DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -893,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return ret;
}
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+ struct i915_psr *psr = &dev_priv->psr;
+
+ intel_psr_disable_locked(psr->dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -903,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
if (!dev_priv->psr.enabled)
goto unlock;
+ if (READ_ONCE(dev_priv->psr.irq_aux_error))
+ intel_psr_handle_irq(dev_priv);
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -925,25 +1011,6 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!dev_priv->psr.active)
- return;
-
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
- dev_priv->psr.active = false;
-}
-
/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
@@ -960,9 +1027,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -975,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
@@ -1003,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -1018,28 +1076,21 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled) {
- intel_psr_exit(dev_priv);
- } else {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(pipe), 0);
- }
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1056,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
if (!HAS_PSR(dev_priv))
return;
@@ -1065,11 +1118,24 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.sink_support)
return;
- if (i915_modparams.enable_psr == -1) {
- i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+ if (i915_modparams.enable_psr == -1)
+ if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ i915_modparams.enable_psr = 0;
- /* Per platform default: all disabled. */
- i915_modparams.enable_psr = 0;
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+ if (val) {
+ DRM_DEBUG_KMS("PSR interruption error set\n");
+ dev_priv->psr.sink_not_reliable = true;
+ return;
}
/* Set link_standby x link_off defaults */
@@ -1109,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
}
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1126,12 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
val & ~errors);
- if (val & errors)
+ if (val & errors) {
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-
- /* TODO: handle PSR2 errors */
exit:
mutex_unlock(&psr->lock);
}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool ret;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&dev_priv->psr.lock);
+ ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+ mutex_unlock(&dev_priv->psr.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 000000000000..ec2b0fc92b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_i915_private *i915);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Apple Macbook 2,1 (Core 2 T7400) */
+ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+ /* Apple Macbook 4,1 */
+ { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+ /* Dell Chromebook 11 */
+ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Dell Chromebook 11 (2015 version) */
+ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+ struct pci_dev *d = i915->drm.pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(i915);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..c5eb26a7ee79 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -150,8 +150,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +158,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +243,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +312,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -529,6 +524,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
+ if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -546,10 +548,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
intel_ring_update_space(ring);
+
+ /* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->tail);
+ I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -574,6 +577,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
+ }
+
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine);
out:
@@ -608,7 +617,9 @@ static void skip_request(struct i915_request *rq)
static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{
- GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
+ GEM_TRACE("%s request global=%d, current=%d\n",
+ engine->name, rq ? rq->global_seqno : 0,
+ intel_engine_get_seqno(engine));
/*
* Try to restore the logical GPU state to match the continuation
@@ -640,7 +651,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret != 0)
return ret;
@@ -658,8 +669,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -741,9 +750,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags))
+ continue;
+
+ dma_fence_set_error(&request->fence, -EIO);
}
+
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -971,7 +989,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -979,7 +997,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1055,8 +1075,7 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map =
- HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
+ enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
@@ -1437,7 +1456,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1462,21 +1480,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1550,7 +1559,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1659,7 +1668,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..72edaa7ff411 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -93,11 +94,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +191,22 @@ enum intel_engine_id {
};
struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
- struct list_head requests;
+ unsigned long used;
int priority;
};
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
+
struct st_preempt_hang {
struct completion completion;
bool inject_hang;
@@ -302,13 +314,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
/**
- * @csb_read: control register for Context Switch buffer
- *
- * Note this register is always in mmio.
- */
- u32 __iomem *csb_read;
-
- /**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
@@ -328,15 +333,6 @@ struct intel_engine_execlists {
u32 preempt_complete_status;
/**
- * @csb_write_reset: reset value for CSB write pointer
- *
- * As the CSB write pointer maybe either in HWSP or as a field
- * inside an mmio register, we want to reprogram it slightly
- * differently to avoid later confusion.
- */
- u32 csb_write_reset;
-
- /**
* @csb_head: context status buffer head
*/
u8 csb_head;
@@ -440,7 +436,9 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -487,11 +485,10 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct i915_request *rq);
- /* Call when the priority on a request has changed and it and its
+ /*
+ * Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
- *
- * Called under the struct_mutex.
*/
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -898,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 44e4491a4918..4350a5270423 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -208,7 +210,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
if (power_well->desc->always_on)
continue;
@@ -436,6 +438,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: icl */
+ if (IS_ICELAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, port)) {
+ val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
}
static void
@@ -456,6 +467,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ u32 val;
+
+ val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (power_well->desc->hsw.is_tc_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+ hsw_power_well_enable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +495,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ enum i915_power_well_id id = power_well->desc->id;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
- return (I915_READ(regs->driver) & mask) == mask;
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= I915_READ(regs->bios);
+
+ return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +595,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +670,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
assert_can_enable_dc9(dev_priv);
DRM_DEBUG_KMS("Enabling DC9\n");
-
- intel_power_sequencer_reset(dev_priv);
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_power_sequencer_reset(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -707,7 +758,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -808,6 +859,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ icl_combo_phys_init(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1667,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1971,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
- * - eDP/DSI VDSC
* - KVMR (HW control)
*/
#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
@@ -2041,7 +2100,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2058,7 +2117,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
static const struct i915_power_well_desc i830_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2102,7 +2161,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
static const struct i915_power_well_desc hsw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2123,7 +2182,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
static const struct i915_power_well_desc bdw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2166,7 +2225,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
static const struct i915_power_well_desc vlv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2242,7 +2301,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
static const struct i915_power_well_desc chv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2293,7 +2352,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2301,6 +2360,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2313,6 +2373,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2446,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
static const struct i915_power_well_desc bxt_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2443,7 +2506,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2451,6 +2514,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2571,7 +2635,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2579,6 +2643,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2716,6 +2781,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_tc_phy_aux_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
static const struct i915_power_well_regs icl_aux_power_well_regs = {
.bios = ICL_PWR_WELL_CTL_AUX1,
.driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2803,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2739,6 +2811,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2861,81 +2934,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX E",
.domains = ICL_AUX_E_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX F",
.domains = ICL_AUX_F_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
},
},
{
@@ -2969,17 +3050,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
+ if (INTEL_GEN(dev_priv) >= 11) {
max_dc = 2;
- mask = 0;
- } else if (IS_GEN9_LP(dev_priv)) {
- max_dc = 1;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ max_dc = 1;
+ mask = DC_STATE_EN_DC9;
} else {
max_dc = 0;
mask = 0;
@@ -3075,12 +3159,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_ICELAKE(dev_priv)) {
err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
- err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3092,13 +3170,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (!IS_CNL_WITH_PORT_F(dev_priv))
power_domains->power_well_count -= 2;
-
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
err = set_power_wells(power_domains, glk_power_wells);
+ } else if (IS_BROXTON(dev_priv)) {
+ err = set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEN9_BC(dev_priv)) {
+ err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
err = set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_BROADWELL(dev_priv)) {
+ err = set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
+ err = set_power_wells(power_domains, hsw_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
err = set_power_wells(power_domains, vlv_power_wells);
} else if (IS_I830(dev_priv)) {
@@ -3238,18 +3321,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
I915_WRITE(MBUS_ABOX_CTL, val);
}
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ val = I915_READ(reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ I915_WRITE(reg, val);
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
@@ -3305,7 +3410,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3315,9 +3419,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ intel_pch_reset_handshake(dev_priv, false);
/* Enable PG1 */
mutex_lock(&power_domains->lock);
@@ -3363,101 +3465,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-enum {
- PROCMON_0_85V_DOT_0,
- PROCMON_0_95V_DOT_0,
- PROCMON_0_95V_DOT_1,
- PROCMON_1_05V_DOT_0,
- PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
- u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
- [PROCMON_0_85V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [PROCMON_0_95V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [PROCMON_0_95V_DOT_1] =
- { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [PROCMON_1_05V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [PROCMON_1_05V_DOT_1] =
- { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
-{
- const struct cnl_procmon *procmon;
- u32 val;
-
- val = I915_READ(ICL_PORT_COMP_DW3(port));
- switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
- default:
- MISSING_CASE(val);
- /* fall through */
- case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
- break;
- }
-
- val = I915_READ(ICL_PORT_COMP_DW1(port));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH Reset Handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
- /* 2. Enable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
-
- /* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
- val = I915_READ(CNL_PORT_COMP_DW0);
- val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
-
- /* 3. */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ /* 2-3. */
+ cnl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3482,7 +3501,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3506,44 +3524,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
- /* 5. Disable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ /* 5. */
+ cnl_combo_phys_uninit(dev_priv);
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
- for (port = PORT_A; port <= PORT_B; port++) {
- /* 2. Enable DDI combo PHY comp. */
- val = I915_READ(ICL_PHY_MISC(port));
- val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
-
- cnl_set_procmon_ref_values(dev_priv, port);
-
- val = I915_READ(ICL_PORT_COMP_DW0(port));
- val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
- /* 3. Set power down enable. */
- val = I915_READ(ICL_PORT_CL_DW5(port));
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
- }
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2-3. */
+ icl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3567,12 +3564,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3594,12 +3589,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
- /* 5. Disable Comp */
- for (port = PORT_A; port <= PORT_B; port++) {
- val = I915_READ(ICL_PHY_MISC(port));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
- }
+ /* 5. */
+ icl_combo_phys_uninit(dev_priv);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3757,7 +3748,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
- }
+ } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/*
* Keep all power wells enabled for any dependent HW access during
@@ -3951,14 +3943,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
int domains_count;
bool enabled;
- /*
- * Power wells not belonging to any domain (like the MISC_IO
- * and PW1 power wells) are under FW control, so ignore them,
- * since their state can change asynchronously.
- */
- if (!power_well->desc->domains)
- continue;
-
enabled = power_well->desc->ops->is_enabled(dev_priv,
power_well);
if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e512a8..5805ec1aba12 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
- /**
- * This is sdvo fixed pannel mode pointer
- */
- struct drm_display_mode *sdvo_lvds_fixed_mode;
-
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (IS_LVDS(intel_sdvo_connector) &&
- (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
- intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
- args.scaled = 1;
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (fixed_mode->hdisplay != width ||
+ fixed_mode->vdisplay != height)
+ args.scaled = 1;
+ }
return intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo_connector->base.panel.fixed_mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
/* lvds has a special fixed output timing. */
if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_dtd_from_mode(&output_dtd,
- intel_sdvo->sdvo_lvds_fixed_mode);
+ intel_sdvo_connector->base.panel.fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
- if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void intel_sdvo_destroy(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-
- drm_connector_cleanup(connector);
- kfree(intel_sdvo_connector);
-}
-
static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_set_property = intel_sdvo_connector_atomic_set_property,
.late_register = intel_sdvo_connector_register,
.early_unregister = intel_sdvo_connector_unregister,
- .destroy = intel_sdvo_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
};
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
- if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(encoder->dev,
- intel_sdvo->sdvo_lvds_fixed_mode);
-
i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
+ struct drm_display_mode *fixed_mode =
drm_mode_duplicate(connector->dev, mode);
+
+ intel_panel_init(&intel_connector->panel,
+ fixed_mode, NULL);
break;
}
}
- if (!intel_sdvo->sdvo_lvds_fixed_mode)
+ if (!intel_connector->panel.fixed_mode)
goto err;
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
drm_connector_unregister(connector);
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d3090a7537bb..d2e003d8f3db 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y2 = (src_y + src_h) << 16;
if (fb->format->is_yuv &&
- fb->format->format != DRM_FORMAT_NV12 &&
(src_x & 1 || src_w & 1)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
return -EINVAL;
}
+ if (fb->format->is_yuv &&
+ fb->format->num_planes > 1 &&
+ (src_y & 1 || src_h & 1)) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
+ src_y, src_h);
+ return -EINVAL;
+ }
+
return 0;
}
-unsigned int
+static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
@@ -328,7 +336,8 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
+ if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
+ !icl_is_hdr_plane(plane)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -346,7 +355,6 @@ skl_program_scaler(struct intel_plane *plane,
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
@@ -355,70 +363,239 @@ skl_program_scaler(struct intel_plane *plane,
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
}
-void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xABF8,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ };
+
+ /* Matrix for Limited Range to Full Range Conversion */
+ static const u16 input_csc_matrix_lr[][9] = {
+ /*
+ * BT.601 Limted range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164384, 0.000, 1.596370,
+ * 1.138393, -0.382500, -0.794598,
+ * 1.138393, 1.971696, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7CC8, 0x7950, 0x0,
+ 0x8CB8, 0x7918, 0x9C40,
+ 0x0, 0x7918, 0x7FC8,
+ },
+ /*
+ * BT.709 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.833671,
+ * 1.138393, -0.213249, -0.532909,
+ * 1.138393, 2.112402, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7EA8, 0x7950, 0x0,
+ 0x8888, 0x7918, 0xADA8,
+ 0x0, 0x7918, 0x6870,
+ },
+ };
+ const u16 *csc;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->base.color_encoding];
+ else
+ csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+ GOFF(csc[1]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+ GOFF(csc[4]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+ GOFF(csc[7]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane, bool slave, u32 plane_ctl)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->color_plane[0].offset;
- u32 stride = skl_plane_stride(plane_state, 0);
+ u32 surf_addr = plane_state->color_plane[color_plane].offset;
+ u32 stride = skl_plane_stride(plane_state, color_plane);
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
+ uint32_t x = plane_state->color_plane[color_plane].x;
+ uint32_t y = plane_state->color_plane[color_plane].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct intel_plane *linked = plane_state->linked_plane;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u8 alpha = plane_state->base.alpha >> 8;
unsigned long irqflags;
+ u32 keymsk, keymax;
/* Sizes are 0 based */
src_w--;
src_h--;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
+ keymsk = key->channel_mask & 0x3ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
- if (key->flags) {
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
}
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
- if (plane_state->scaler_id >= 0) {
- skl_program_scaler(plane, crtc_state, plane_state);
+ if (icl_is_hdr_plane(plane)) {
+ u32 cus_ctl = 0;
+
+ if (linked) {
+ /* Enable and use MPEG-2 chroma siting */
+ cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+ PLANE_CUS_VPHASE_0_25;
+
+ if (linked->id == PLANE_SPRITE5)
+ cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
+ }
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
- } else {
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ plane_state->color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+ if (!slave && plane_state->scaler_id >= 0)
+ skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+static void
+skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->linked_plane) {
+ /* Program the UV plane */
+ color_plane = 1;
+ }
+
+ skl_program_plane(plane, crtc_state, plane_state,
+ color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ skl_program_plane(plane, crtc_state, plane_state, 0, true,
+ plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -427,15 +604,15 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+ skl_write_plane_wm(plane, crtc_state);
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-bool
+static bool
skl_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
@@ -628,7 +805,6 @@ vlv_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
@@ -651,38 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- vlv_update_clrc(plane_state);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- else
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE_FW(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+ vlv_update_clrc(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -692,9 +871,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-
I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -789,7 +966,6 @@ ivb_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -818,37 +994,42 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (IS_IVYBRIDGE(dev_priv))
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
if (key->flags) {
I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
- else
+ } else {
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ }
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -857,12 +1038,10 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
- /* Can't leave the scaler enabled... */
+ /* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
-
I915_WRITE_FW(SPRSURF(pipe), 0);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -961,7 +1140,6 @@ g4x_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -990,32 +1168,35 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
if (key->flags) {
I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- else
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1026,9 +1207,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
I915_WRITE_FW(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE_FW(DVSSCALE(pipe), 0);
-
I915_WRITE_FW(DVSSURF(pipe), 0);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1054,6 +1233,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ default:
+ return true;
+ }
+}
+
static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -1121,18 +1313,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (INTEL_GEN(dev_priv) < 7) {
- min_scale = 1;
- max_scale = 16 << 16;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- min_scale = 1;
- max_scale = 2 << 16;
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ if (intel_fb_scalable(plane_state->base.fb)) {
+ if (INTEL_GEN(dev_priv) < 7) {
+ min_scale = 1;
+ max_scale = 16 << 16;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ min_scale = 1;
+ max_scale = 2 << 16;
+ }
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1219,6 +1411,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
struct drm_format_name_buf format_name;
@@ -1247,13 +1441,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/*
- * 90/270 is not allowed with RGB64 16:16:16:16,
- * RGB 16-bit 5:6:5, and Indexed 8-bit.
- * TBD: Add RGB64 case once its added in supported format list.
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ * TBD: Add RGB64 case once its added in supported format
+ * list.
*/
switch (fb->format->format) {
- case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
+ case DRM_FORMAT_C8:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1307,12 +1505,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
return 0;
}
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+ /* Display WA #1106 */
+ if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1320,15 +1537,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* use scaler when colorkey is not required */
- if (!plane_state->ckey.flags) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
-
+ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state,
- fb ? fb->format->format : 0);
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = skl_max_scale(crtc_state, fb->format->format);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1349,10 +1560,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->base.alpha >> 8))
+ plane_state->base.visible = false;
+
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1517,24 +1736,30 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static uint32_t skl_plane_formats[] = {
+static const uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
-static uint32_t skl_planar_formats[] = {
+static const uint32_t skl_planar_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -1739,8 +1964,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) >= 11)
+ return plane_id <= PLANE_SPRITE3;
+
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
if (plane_id == PLANE_CURSOR)
return false;
@@ -1757,109 +2010,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
}
struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane)
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
- struct intel_plane *intel_plane = NULL;
- struct intel_plane_state *state = NULL;
- const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
- const uint32_t *plane_formats;
- const uint64_t *modifiers;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
unsigned int supported_rotations;
- int num_plane_formats;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
- if (!intel_plane) {
- ret = -ENOMEM;
- goto fail;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- state = intel_create_plane_state(&intel_plane->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
+ plane->max_stride = skl_plane_max_stride;
+ plane->update_plane = skl_update_plane;
+ plane->disable_plane = skl_disable_plane;
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+ if (icl_is_nv12_y_plane(plane_id))
+ plane->update_slave = icl_update_slave;
+
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ } else {
+ formats = skl_plane_formats;
+ num_formats = ARRAY_SIZE(skl_plane_formats);
}
- intel_plane->base.state = &state->base;
- if (INTEL_GEN(dev_priv) >= 9) {
- state->scaler_id = -1;
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
- intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_SPRITE0 + plane);
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
- intel_plane->max_stride = skl_plane_max_stride;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- intel_plane->get_hw_state = skl_plane_get_hw_state;
- intel_plane->check_plane = skl_plane_check;
+ possible_crtcs = BIT(pipe);
- if (skl_plane_has_planar(dev_priv, pipe,
- PLANE_SPRITE0 + plane)) {
- plane_formats = skl_planar_formats;
- num_plane_formats = ARRAY_SIZE(skl_planar_formats);
- } else {
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, &skl_plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
- if (intel_plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
-
- plane_funcs = &skl_plane_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_plane->max_stride = i9xx_plane_max_stride;
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->get_hw_state = vlv_plane_get_hw_state;
- intel_plane->check_plane = vlv_sprite_check;
-
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int sprite)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned long possible_crtcs;
+ unsigned int supported_rotations;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = vlv_update_plane;
+ plane->disable_plane = vlv_disable_plane;
+ plane->get_hw_state = vlv_plane_get_hw_state;
+ plane->check_plane = vlv_sprite_check;
+
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->get_hw_state = ivb_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
-
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = ivb_update_plane;
+ plane->disable_plane = ivb_disable_plane;
+ plane->get_hw_state = ivb_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
+
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &snb_sprite_funcs;
} else {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = g4x_update_plane;
- intel_plane->disable_plane = g4x_disable_plane;
- intel_plane->get_hw_state = g4x_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = g4x_update_plane;
+ plane->disable_plane = g4x_disable_plane;
+ plane->get_hw_state = g4x_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
plane_funcs = &snb_sprite_funcs;
} else {
- plane_formats = g4x_plane_formats;
- num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+ formats = g4x_plane_formats;
+ num_formats = ARRAY_SIZE(g4x_plane_formats);
plane_funcs = &g4x_sprite_funcs;
}
}
- if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -1868,35 +2185,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
}
- intel_plane->pipe = pipe;
- intel_plane->i9xx_plane = plane;
- intel_plane->id = PLANE_SPRITE0 + plane;
- intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
+ plane->pipe = pipe;
+ plane->id = PLANE_SPRITE0 + sprite;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = (1 << pipe);
+ possible_crtcs = BIT(pipe);
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "plane %d%c", plane + 2, pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, plane));
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, sprite));
if (ret)
goto fail;
- drm_plane_create_rotation_property(&intel_plane->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- drm_plane_create_color_properties(&intel_plane->base,
+ drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1904,13 +2211,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
- drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- return intel_plane;
+ return plane;
fail:
- kfree(state);
- kfree(intel_plane);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb892e9..860f306a23ba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
return count;
}
-static void
-intel_tv_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_tv_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81b6e24..b34c318b238d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -EAGAIN)
+ if (ret == 0 || ret != -ETIMEDOUT)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa83267..0e3bd580e267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
return uc_fw->path != NULL;
}
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+}
+
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
- if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+ if (intel_uc_fw_is_loaded(uc_fw))
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c66254..9289515108c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+ } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf83cbd..bf3662ad5fed 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
#define VBT_DP_MAX_LINK_RATE_HBR 2
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 000000000000..c56ba0e04044
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+ u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ u8 i = 0;
+ int row_index = 0;
+ int column_index = 0;
+ u8 line_buf_depth = 0;
+
+ vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc_params.slice_count);
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8.
+ * Eventually add logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ /* Values filled from DSC Sink DPCD */
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->enable422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ column_index =
+ get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+ if (row_index < 0 || column_index < 0)
+ return -EINVAL;
+
+ vdsc_cfg->first_line_bpg_offset =
+ rc_params[row_index][column_index].first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay =
+ rc_params[row_index][column_index].initial_xmit_delay;
+ vdsc_cfg->initial_offset =
+ rc_params[row_index][column_index].initial_offset;
+ vdsc_cfg->flatness_min_qp =
+ rc_params[row_index][column_index].flatness_min_qp;
+ vdsc_cfg->flatness_max_qp =
+ rc_params[row_index][column_index].flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 =
+ rc_params[row_index][column_index].rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 =
+ rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+ * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+ * 48 bits
+ */
+ if (vdsc_cfg->bits_per_component == 8 ||
+ vdsc_cfg->bits_per_component == 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else if (vdsc_cfg->bits_per_component == 12)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* RC_MODEL_SIZE is a constant across all configurations */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+ * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * For any other transcoder, VDSC/joining uses the power well associated
+ * with the pipe/transcoder in use. Hence another reference on the
+ * transcoder power domain will suffice.
+ */
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ else
+ return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->enable422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+ intel_dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ /* Enable Power wells for VDSC/joining */
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
+
+ intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+ intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+ if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc_params.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+ if (!old_crtc_state->dsc_params.compression_enable)
+ return;
+
+ if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ if (dss_ctl1_val & JOINER_ENABLE)
+ dss_ctl1_val &= ~JOINER_ENABLE;
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+ dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+ dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+ dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+ RIGHT_BRANCH_VDSC_ENABLE);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+ /* Disable Power wells for VDSC/joining */
+ intel_display_power_put(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..4f41e326f3f3 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_add(struct drm_i915_private *i915,
- i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
- struct i915_workarounds *wa = &i915->workarounds;
- unsigned int start = 0, end = wa->count;
- unsigned int addr = i915_mmio_reg_offset(reg);
- struct i915_wa_reg *r;
+ wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
while (start < end) {
unsigned int mid = start + (end - start) / 2;
- if (wa->reg[mid].addr < addr) {
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
- } else if (wa->reg[mid].addr > addr) {
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
- r = &wa->reg[mid];
+ wa_ = &wal->list[mid];
- if ((mask & ~r->mask) == 0) {
+ if ((wa->mask & ~wa_->mask) == 0) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, r->mask, r->value);
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->mask, wa_->val);
- r->value &= ~mask;
+ wa_->val &= ~wa->mask;
}
- r->value |= val;
- r->mask |= mask;
+ wal->wa_count++;
+ wa_->val |= wa->val;
+ wa_->mask |= wa->mask;
return;
}
}
- if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
- DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, mask, val);
- return;
- }
-
- r = &wa->reg[wa->count++];
- r->addr = addr;
- r->value = val;
- r->mask = mask;
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
- while (r-- > wa->reg) {
- GEM_BUG_ON(r[0].addr == r[1].addr);
- if (r[1].addr > r[0].addr)
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
break;
- swap(r[1], r[0]);
+ swap(wa_[1], wa_[0]);
}
}
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
-
- return 0;
}
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
}
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- if (HAS_LLC(dev_priv)) {
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
+ if (!IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(i915))
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- return 0;
}
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
+ return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
}
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(dev_priv);
+ gen9_ctx_workarounds_init(engine);
+ skl_tune_iz_hashing(engine);
}
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
}
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
PUSH_CONSTANT_DEREF_DISABLE);
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN11_STATE_CACHE_REDIRECT_TO_CS);
/* Wa_2006665173:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
- return 0;
}
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
- int err = 0;
-
- dev_priv->workarounds.count = 0;
-
- if (INTEL_GEN(dev_priv) < 8)
- err = 0;
- else if (IS_BROADWELL(dev_priv))
- err = bdw_ctx_workarounds_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_ctx_workarounds_init(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_ctx_workarounds_init(dev_priv);
- else if (IS_BROXTON(dev_priv))
- err = bxt_ctx_workarounds_init(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_ctx_workarounds_init(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_ctx_workarounds_init(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_ctx_workarounds_init(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_ctx_workarounds_init(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- err = icl_ctx_workarounds_init(dev_priv);
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ wa_init_start(wal, "context");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_CANNONLAKE(i915))
+ cnl_ctx_workarounds_init(engine);
+ else if (IS_ICELAKE(i915))
+ icl_ctx_workarounds_init(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
- if (err)
- return err;
+ MISSING_CASE(INTEL_GEN(i915));
- DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
- dev_priv->workarounds.count);
- return 0;
+ wa_init_finish(wal);
}
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
- struct i915_workarounds *w = &rq->i915->workarounds;
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
u32 *cs;
- int ret, i;
+ int ret;
- if (w->count == 0)
+ if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = w->reg[i].addr;
- *cs++ = w->reg[i].value;
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->val;
}
*cs++ = MI_NOOP;
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,173 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROADWELL(i915))
+ return;
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
}
-struct whitelist {
- i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
- unsigned int count;
- u32 nopid;
-};
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
- if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
return;
- w->reg[w->count++] = reg;
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
}
-static void bdw_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal,
+ const char *from)
{
+ struct i915_wa *wa;
+ unsigned int i;
+ bool ok = true;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+ return ok;
}
-static void chv_whitelist_build(struct whitelist *w)
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from)
{
+ return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
}
-static void gen9_whitelist_build(struct whitelist *w)
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ _wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -967,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -975,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -988,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -996,37 +1052,41 @@ static void glk_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
{
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
{
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
}
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
- struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
GEM_BUG_ON(engine->id != RCS);
- w->count = 0;
- w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+ wa_init_start(w, "whitelist");
if (INTEL_GEN(i915) < 8)
- return NULL;
+ return;
else if (IS_BROADWELL(i915))
- bdw_whitelist_build(w);
+ return;
else if (IS_CHERRYVIEW(i915))
- chv_whitelist_build(w);
+ return;
else if (IS_SKYLAKE(i915))
skl_whitelist_build(w);
else if (IS_BROXTON(i915))
@@ -1044,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
else
MISSING_CASE(INTEL_GEN(i915));
- return w;
+ wa_init_finish(w);
}
-static void whitelist_apply(struct intel_engine_cs *engine,
- const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_list *wal = &engine->whitelist;
const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
unsigned int i;
- if (!w)
+ if (!wal->count)
return;
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- for (i = 0; i < w->count; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(w->reg[i]));
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
- struct whitelist w;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
- whitelist_apply(engine, whitelist_build(engine, &w));
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406609255:icl (pre-prod) */
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_DEMAND_PREFETCH |
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..7c734714b05e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5c22f2c8d4cf..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n", id);
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
continue;
}
engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ /*
+ * In order to utilize 64K pages we need to both pad the vma
+ * size and ensure the vma offset is at the start of the pt
+ * boundary, however to improve coverage we opt for testing both
+ * aligned and unaligned offsets.
+ */
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ offset_low = round_down(offset_low,
+ I915_GTT_PAGE_SIZE_2M);
+
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high, max_page_size))
+ __func__, engine->id, offset_low, offset_high,
+ max_page_size))
break;
}
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
};
- int saved_ppgtt = i915_modparams.enable_ppgtt;
struct drm_i915_private *dev_priv;
- struct pci_dev *pdev;
struct i915_hw_ppgtt *ppgtt;
+ struct pci_dev *pdev;
int err;
dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- i915_modparams.enable_ppgtt = 3;
+ mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
pdev = dev_priv->drm.pdev;
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_modparams.enable_ppgtt = saved_ppgtt;
-
drm_dev_put(&dev_priv->drm);
return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
struct i915_gem_context *ctx;
int err;
- if (!USES_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(dev_priv)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25aa90c9..7d82043aff10 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
const char *func;
const char *name;
- unsigned int reset_count;
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
};
static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
const char *func,
const char *name)
{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err;
t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
}
i915->gpu_error.missed_irq_rings = 0;
- t->reset_count = i915_reset_count(&i915->gpu_error);
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, i915, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
return 0;
}
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
return -EIO;
- if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
pr_err("%s(%s): GPU was reset %d times!\n",
t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_count);
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
return -EIO;
}
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- unsigned long ncontexts, ndwords, dw;
- bool first_shared_gtt = true;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ncontexts = 0;
ndwords = 0;
dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
struct i915_gem_context *ctx;
unsigned int id;
- if (first_shared_gtt) {
- ctx = __create_hw_context(i915, file->driver_priv);
- first_shared_gtt = false;
- } else {
- ctx = i915_gem_create_context(i915, file->driver_priv);
- }
+ ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+ struct drm_mm_node *node =
+ __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+ offset, offset + sizeof(u32) - 1);
+ if (!node || node->start > offset)
+ return 0;
+
+ GEM_BUG_ON(offset >= node->start + node->size);
+
+ pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+ upper_32_bits(offset), lower_32_bits(offset));
+ return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ } else {
+ *cmd++ = 0;
+ *cmd++ = offset;
+ }
+ *cmd++ = value;
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto skip_request;
+
+ i915_gem_object_set_active_reference(obj);
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 *value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
+ const u32 result = 0x100;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ *cmd++ = 0;
+ } else {
+ *cmd++ = MI_LOAD_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = offset;
+ *cmd++ = MI_STORE_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *value = cmd[result / sizeof(*cmd)];
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_a, *ctx_b;
+ struct intel_engine_cs *engine;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ struct live_test t;
+ unsigned int id;
+ u64 vm_total;
+ int err;
+
+ if (INTEL_GEN(i915) < 7)
+ return 0;
+
+ /*
+ * The simple goal here is that a write into one context is not
+ * observed in a second (separate page tables and scratch).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_a)) {
+ err = PTR_ERR(ctx_a);
+ goto out_unlock;
+ }
+
+ ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_b)) {
+ err = PTR_ERR(ctx_b);
+ goto out_unlock;
+ }
+
+ /* We can only test vm isolation, if the vm are distinct */
+ if (ctx_a->ppgtt == ctx_b->ppgtt)
+ goto out_unlock;
+
+ vm_total = ctx_a->ppgtt->vm.total;
+ GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+ vm_total -= I915_GTT_PAGE_SIZE;
+
+ intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ unsigned long this = 0;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ u32 value = 0xc5c5c5c5;
+ u64 offset;
+
+ div64_u64_rem(i915_prandom_u64_state(&prng),
+ vm_total, &offset);
+ offset &= ~sizeof(u32);
+ offset += I915_GTT_PAGE_SIZE;
+
+ err = write_to_scratch(ctx_a, engine,
+ offset, 0xdeadbeef);
+ if (err == 0)
+ err = read_from_scratch(ctx_b, engine,
+ offset, &value);
+ if (err)
+ goto out_rpm;
+
+ if (value) {
+ pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+ engine->name, value,
+ upper_32_bits(offset),
+ lower_32_bits(offset),
+ this);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ this++;
+ }
+ count += this;
+ }
+ pr_info("Checked %lu scratch offsets across %d engines\n",
+ count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+ intel_runtime_pm_put(i915);
+out_unlock:
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -865,33 +1207,6 @@ out_unlock:
return err;
}
-static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- int err;
-
- err = i915_gem_init_aliasing_ppgtt(i915);
- if (err)
- return err;
-
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- continue;
-
- vma->flags &= ~I915_VMA_LOCAL_BIND;
- }
-
- return 0;
-}
-
-static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
-{
- i915_gem_fini_aliasing_ppgtt(i915);
-}
-
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
+ SUBTEST(igt_vm_isolation),
};
- bool fake_alias = false;
- int err;
if (i915_terminally_wedged(&dev_priv->gpu_error))
return 0;
- /* Install a fake aliasing gtt for exercise */
- if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = fake_aliasing_ppgtt_enable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (err)
- return err;
-
- GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
- fake_alias = true;
- }
-
- err = i915_subtests(tests, dev_priv);
-
- if (fake_alias) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- fake_aliasing_ppgtt_disable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- return err;
+ return i915_subtests(tests, dev_priv);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1cf0647..4365979d8222 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
* where the GTT space of the request is separate from the GGTT
* allocation required to build the request.
*/
- if (!USES_FULL_PPGTT(i915))
+ if (!HAS_FULL_PPGTT(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 127d81513671..69fe86b30fbb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
/* Allocate a ppggt and try to fill the entire range */
- if (!USES_PPGTT(dev_priv))
+ if (!HAS_PPGTT(dev_priv))
return 0;
ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
IGT_TIMEOUT(end_time);
int err;
- if (!USES_FULL_PPGTT(dev_priv))
+ if (!HAS_FULL_PPGTT(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000000..208a966da8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000000..5f0234d045d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000000..8cd34f6e6859
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = i915_coherent_map_type(i915);
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000000..391777c76dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82b6228..32cba4cae31a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
* Get rid of clients created during driver load because the test will
* recreate them.
*/
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
if (guc->execbuf_client || guc->preempt_client) {
pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
goto out;
}
- /* Now create the doorbells */
- guc_clients_doorbell_init(guc);
+ /* Now enable the clients */
+ guc_clients_enable(guc);
/* each client should now have received a doorbell */
if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
* Basic test - an attempt to reallocate a valid doorbell to the
* client it is currently assigned should not cause a failure.
*/
- err = guc_clients_doorbell_init(guc);
- if (err)
- goto out;
-
- /*
- * Negative test - a client with no doorbell (invalid db id).
- * After destroying the doorbell, the db id is changed to
- * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
- * allocate a doorbell with an invalid id (db has to be reserved before
- * allocation).
- */
- destroy_doorbell(guc->execbuf_client);
- if (client_doorbell_in_sync(guc->execbuf_client)) {
- pr_err("destroy db did not work\n");
- err = -EINVAL;
- goto out;
- }
-
- unreserve_doorbell(guc->execbuf_client);
-
- __create_doorbell(guc->execbuf_client);
- err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
- if (err != -EIO) {
- pr_err("unexpected (err = %d)", err);
- goto out_db;
- }
-
- if (!available_dbs(guc, guc->execbuf_client->priority)) {
- pr_err("doorbell not available when it should\n");
- err = -EIO;
- goto out_db;
- }
-
-out_db:
- /* clean after test */
- __destroy_doorbell(guc->execbuf_client);
- err = reserve_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("failed to reserve back the doorbell back\n");
- }
err = create_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("recreate doorbell failed\n");
- goto out;
- }
out:
/*
* Leave clean state for other test, plus the driver always destroy the
* clients during unload.
*/
- destroy_doorbell(guc->execbuf_client);
- if (guc->preempt_client)
- destroy_doorbell(guc->preempt_client);
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
guc_clients_create(guc);
- guc_clients_doorbell_init(guc);
+ guc_clients_enable(guc);
unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
db_id = clients[i]->doorbell_id;
- err = create_doorbell(clients[i]);
+ err = __guc_client_enable(clients[i]);
if (err) {
pr_err("[%d] Failed to create a doorbell\n", i);
goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
out:
for (i = 0; i < ATTEMPTS; i++)
if (!IS_ERR_OR_NULL(clients[i])) {
- destroy_doorbell(clients[i]);
+ __guc_client_disable(clients[i]);
guc_client_free(clients[i]);
}
unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db378226ac10..40efbed611de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_reset.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -76,7 +77,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
- HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(h->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
+ struct igt_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ timeout = 0;
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ timeout = i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ timeout = -EIO;
+
i915_request_put(rq);
if (timeout < 0) {
@@ -348,40 +355,6 @@ unlock:
return err;
}
-static void global_reset_lock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
-
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
-
- for_each_engine(engine, i915, id) {
- while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
/* Check that we detect a stuck waiter and issue a reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
/* Check that we can recover an unbind stuck on a hanging request */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1150,6 +1123,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
tsk = NULL;
goto out_reset;
}
+ get_task_struct(tsk);
wait_for_completion(&arg.completion);
@@ -1172,6 +1146,8 @@ out_reset:
/* The reset, even indirectly, should take less than 10ms. */
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
err = kthread_stop(tsk);
+
+ put_task_struct(tsk);
}
mutex_lock(&i915->drm.struct_mutex);
@@ -1183,7 +1159,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1263,7 +1239,7 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1394,7 +1370,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8f2224..ca461e3a5f27 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,215 +6,18 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_spinner.h"
+#include "i915_random.h"
#include "mock_context.h"
-struct spinner {
- struct drm_i915_private *i915;
- struct drm_i915_gem_object *hws;
- struct drm_i915_gem_object *obj;
- u32 *batch;
- void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
- unsigned int mode;
- void *vaddr;
- int err;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
-
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->hws)) {
- err = PTR_ERR(spin->hws);
- goto err;
- }
-
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->obj)) {
- err = PTR_ERR(spin->obj);
- goto err_hws;
- }
-
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
- return 0;
-
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
-err_hws:
- i915_gem_object_put(spin->hws);
-err:
- return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
- return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
- const struct i915_request *rq)
-{
- return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
-{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
- struct i915_vma *hws, *vma;
- u32 *batch;
- int err;
-
- vma = i915_vma_instance(spin->obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- hws = i915_vma_instance(spin->hws, vm, NULL);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- err = i915_vma_move_to_active(hws, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
-
- batch = spin->batch;
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
- *batch++ = rq->fence.seqno;
-
- *batch++ = arbitration_command;
-
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
- *batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
- i915_gem_chipset_flush(spin->i915);
-
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
- u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
- return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
- *spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
- spinner_end(spin);
-
- i915_gem_object_unpin_map(spin->obj);
- i915_gem_object_put(spin->obj);
-
- i915_gem_object_unpin_map(spin->hws);
- i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
- return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct spinner spin;
+ struct igt_spinner spin;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
@@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin, rq)) {
+ if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg)
goto err_ctx;
}
- spinner_end(&spin);
+ igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
@@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg)
err_ctx:
kernel_context_close(ctx);
err_spin:
- spinner_fini(&spin);
+ igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -271,7 +74,7 @@ static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -282,34 +85,36 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -317,16 +122,16 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -334,8 +139,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -348,9 +153,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -362,7 +167,7 @@ static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
@@ -374,10 +179,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -391,43 +196,44 @@ static int live_late_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (wait_for_spinner(&spin_hi, rq)) {
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
- attr.priority = I915_PRIORITY_MAX;
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -440,9 +246,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -450,8 +256,8 @@ err_unlock:
return err;
err_wedged:
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
@@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
@@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg)
engine->execlists.preempt_hang.inject_hang = false;
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -563,9 +369,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -573,6 +379,261 @@ err_unlock:
return err;
}
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct drm_i915_private *i915;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = i915_request_alloc(smoke->engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+ for_each_engine(engine, smoke->i915, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ count = 0;
+ for_each_engine(engine, smoke->i915, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->i915, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .i915 = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 1024,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ return 0;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ mutex_lock(&smoke.i915->drm.struct_mutex);
+ intel_runtime_pm_get(smoke.i915);
+
+ smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_unlock;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(smoke.batch);
+
+ err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+ if (err)
+ goto err_batch;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_unlock:
+ intel_runtime_pm_put(smoke.i915);
+ mutex_unlock(&smoke.i915->drm.struct_mutex);
+ kfree(smoke.contexts);
+
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_smoke),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923d2f38..67017d5175b8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -91,17 +94,23 @@ err_obj:
return ERR_PTR(err);
}
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
{
- return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
}
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
{
unsigned int i;
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = results[i];
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
}
}
-static int check_whitelist(const struct whitelist *w,
- struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
}
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = vaddr[i];
if (expected != actual) {
- print_results(w, vaddr);
+ print_results(engine, vaddr);
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
i, expected, actual);
@@ -159,66 +167,107 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, NULL);
+ return i915_reset_engine(engine, "live_workarounds");
}
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
+
+ if (spin)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+
+ if (IS_ERR(rq)) {
+ spin = NULL;
+ err = PTR_ERR(rq);
+ goto err;
+ }
i915_request_add(rq);
- return 0;
+ if (spin && !igt_wait_for_spinner(spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ err = -ETIMEDOUT;
+ }
+
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
}
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
- const struct whitelist *w,
const char *name)
{
+ struct drm_i915_private *i915 = engine->i915;
+ bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
+ struct igt_spinner spin;
int err;
- ctx = kernel_context(engine->i915);
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, name);
+
+ if (want_spin) {
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
+ }
+
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out;
}
- err = switch_to_scratch_context(engine);
+ err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
if (err)
goto out;
+ intel_runtime_pm_get(i915);
err = reset(engine);
+ intel_runtime_pm_put(i915);
+
+ if (want_spin) {
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ }
+
if (err) {
pr_err("%s reset failed\n", name);
goto out;
}
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
kernel_context_close(ctx);
- ctx = kernel_context(engine->i915);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
- struct i915_gpu_error *error = &i915->gpu_error;
- struct whitelist w;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
- if (!engine)
- return 0;
-
- if (!whitelist_build(engine, &w))
+ if (!engine || engine->whitelist.count == 0)
return 0;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
- set_bit(I915_RESET_BACKOFF, &error->flags);
- set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ igt_global_reset_lock(i915);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
- do_engine_reset, &w,
+ do_engine_reset,
"engine");
if (err)
goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
if (intel_has_gpu_reset(i915)) {
err = check_whitelist_across_reset(engine,
- do_device_reset, &w,
+ do_device_reset,
"device");
if (err)
goto out;
}
out:
- clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
+ igt_global_reset_unlock(i915);
return err;
}
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= intel_gt_verify_workarounds(i915, str);
+
+ for_each_engine(engine, i915, id)
+ ok &= intel_engine_verify_workarounds(engine, str);
+
+ return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool ok;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(i915);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_runtime_pm_get(i915);
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+ igt_global_reset_unlock(i915);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ struct i915_request *rq;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ igt_global_reset_lock(i915);
+
+ for_each_engine(engine, i915, id) {
+ bool ok;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ intel_runtime_pm_get(i915);
+ i915_reset_engine(engine, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, i915);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(i915);
+
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ i915_reset_engine(engine, "live_workarounds");
+
+ intel_runtime_pm_put(i915);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_gt_engine_wa(i915, "after busy reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+err:
+ igt_global_reset_unlock(i915);
+ kernel_context_close(ctx);
+
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+
+ return ret;
+}
+
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_gpu_reset_gt_engine_workarounds),
+ SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da45ad5..d0c44c18db42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
- lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35ee8c..361e962a7969 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
.transfer = intel_dsi_host_transfer,
};
-static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
- enum port port)
-{
- struct intel_dsi_host *host;
- struct mipi_dsi_device *device;
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host)
- return NULL;
-
- host->base.ops = &intel_dsi_host_ops;
- host->intel_dsi = intel_dsi;
- host->port = port;
-
- /*
- * We should call mipi_dsi_host_register(&host->base) here, but we don't
- * have a host->dev, and we don't have OF stuff either. So just use the
- * dsi framework as a library and hope for the best. Create the dsi
- * devices by ourselves here too. Need to be careful though, because we
- * don't initialize any of the driver model devices here.
- */
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- kfree(host);
- return NULL;
- }
-
- device->host = &host->base;
- host->device = device;
-
- return host;
-}
-
/*
* send a video mode command
*
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
-}
-
-static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
-}
-
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
int ret;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
-static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
-
- /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
- return;
-
- msleep(msec);
-}
-
/*
* Panel enable/disable sequences from the VBT spec.
*
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
* - wait t4 - wait t4
*/
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
}
/*
- * DSI port enable has to be done before pipe and plane enable, so we do it in
- * the pre_enable hook.
- */
-static void intel_dsi_enable_nop(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
}
-static enum drm_mode_status
-intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-
- DRM_DEBUG_KMS("\n");
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- if (fixed_mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
- }
-
- return MODE_OK;
-}
-
/* return txclkesc cycles in terms of divider and duration in us */
static u16 txclkesc(u32 divider, unsigned int us)
{
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static int intel_dsi_get_modes(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *mode;
-
- DRM_DEBUG_KMS("\n");
-
- if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
- return 0;
- }
-
- mode = drm_mode_duplicate(connector->dev,
- intel_connector->panel.fixed_mode);
- if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- return 1;
-}
-
-static void intel_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- DRM_DEBUG_KMS("\n");
- intel_panel_fini(&intel_connector->panel);
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dsi_connector_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+static enum drm_panel_orientation
+vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id i9xx_plane;
+ struct intel_encoder *encoder = connector->encoder;
+ enum intel_display_power_domain power_domain;
+ enum drm_panel_orientation orientation;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (connector->encoder->crtc_mask == BIT(PIPE_B))
- i9xx_plane = PLANE_B;
- else
- i9xx_plane = PLANE_A;
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- val = I915_READ(DSPCNTR(i9xx_plane));
- if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- }
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ plane = to_intel_plane(crtc->base.primary);
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+ val = I915_READ(DSPCNTR(plane->i9xx_plane));
+
+ if (!(val & DISPLAY_PLANE_ENABLE))
+ orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ else if (val & DISPPLANE_ROTATE_180)
+ orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else
+ orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+
+ intel_display_power_put(dev_priv, power_domain);
return orientation;
}
+static enum drm_panel_orientation
+vlv_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ orientation = vlv_dsi_get_hw_panel_orientation(connector);
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+ }
+
+ return intel_dsi_get_panel_orientation(connector);
+}
+
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
+ vlv_dsi_get_panel_orientation(connector);
drm_connector_init_panel_orientation_property(
&connector->base,
connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
- intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
- host = intel_dsi_host_init(intel_dsi, port);
+ host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+ port);
if (!host)
goto err;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index fe6becdcc29e..77a26fd3a44a 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 0e6942f21a4e..820c7e3878f0 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale i.MX drm driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/component.h>
#include <linux/device.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3bd0f8a18e74..2c5bbe317353 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - LVDS display bridge
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd3310240e..293dd5752583 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - Television Encoder (TVEv2)
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -442,7 +434,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops clk_tve_di_ops = {
+static const struct clk_ops clk_tve_di_ops = {
.round_rate = clk_tve_di_round_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710b837a..058b53c0aa7e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 Graphics driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 40605fdf0e33..c390924de93d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 DP Overlay Planes
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drmP.h>
@@ -236,9 +228,15 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
{
+ int ret;
+
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+ ipu_plane->base.base.id);
+ }
if (ipu_plane->dp && disable_dp_channel)
ipu_dp_disable_channel(ipu_plane->dp, false);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aefd04e18f93..f3ce51121dd6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - parallel display implementation
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 3ce51d8dfe1c..c28b69f48555 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,6 +7,7 @@ config DRM_MESON
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select REGMAP_MMIO
+ select MESON_CANVAS
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c5c4cc362f02..7709f2fbb9f7 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 08f6073d967e..5de11aa7c775 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -39,6 +39,7 @@
#define CANVAS_WIDTH_HBIT 0
#define CANVAS_HEIGHT_BIT 9
#define CANVAS_BLKMODE_BIT 24
+#define CANVAS_ENDIAN_BIT 26
#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
#define CANVAS_LUT_WR_EN (0x2 << 8)
#define CANVAS_LUT_RD_EN (0x1 << 8)
@@ -47,7 +48,8 @@ void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode)
+ unsigned int blkmode,
+ unsigned int endian)
{
unsigned int val;
@@ -60,7 +62,8 @@ void meson_canvas_setup(struct meson_drm *priv,
CANVAS_WIDTH_HBIT) |
(height << CANVAS_HEIGHT_BIT) |
(wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT));
+ (blkmode << CANVAS_BLKMODE_BIT) |
+ (endian << CANVAS_ENDIAN_BIT));
regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
CANVAS_LUT_WR_EN | canvas_index);
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
index af1759da4b27..85dbf26e2826 100644
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -23,6 +23,9 @@
#define __MESON_CANVAS_H
#define MESON_CANVAS_ID_OSD1 0x4e
+#define MESON_CANVAS_ID_VD1_0 0x60
+#define MESON_CANVAS_ID_VD1_1 0x61
+#define MESON_CANVAS_ID_VD1_2 0x62
/* Canvas configuration. */
#define MESON_CANVAS_WRAP_NONE 0x00
@@ -33,10 +36,16 @@
#define MESON_CANVAS_BLKMODE_32x32 0x01
#define MESON_CANVAS_BLKMODE_64x64 0x02
+#define MESON_CANVAS_ENDIAN_SWAP16 0x1
+#define MESON_CANVAS_ENDIAN_SWAP32 0x3
+#define MESON_CANVAS_ENDIAN_SWAP64 0x7
+#define MESON_CANVAS_ENDIAN_SWAP128 0xf
+
void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode);
+ unsigned int blkmode,
+ unsigned int endian);
#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..d78168f979db 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/bitfield.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -98,6 +99,10 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
writel(crtc_state->mode.hdisplay,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
@@ -110,11 +115,17 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
+ DRM_DEBUG_DRIVER("\n");
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
/* Disable VPP Postblend */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
if (crtc->state->event && !crtc->state->active) {
@@ -149,6 +160,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
struct meson_drm *priv = meson_crtc->priv;
priv->viu.osd1_commit = true;
+ priv->viu.vd1_commit = true;
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
@@ -177,26 +189,37 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
-
- /* If output is interlace, make use of the Scaler */
- if (priv->viu.osd1_interlace) {
- struct drm_plane *plane = priv->primary_plane;
- struct drm_plane_state *state = plane->state;
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
-
- meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
- } else
- meson_vpp_disable_interlace_vscaler_osd1(priv);
-
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ writel_relaxed(priv->viu.osd_sc_ctrl0,
+ priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_i_wh_m1,
+ priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+ writel_relaxed(priv->viu.osd_sc_o_h_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+ writel_relaxed(priv->viu.osd_sc_o_v_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+ writel_relaxed(priv->viu.osd_sc_v_ini_phase,
+ priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_v_phase_step,
+ priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ini_phase,
+ priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_h_phase_step,
+ priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ctrl0,
+ priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_v_ctrl0,
+ priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
@@ -205,6 +228,206 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->viu.osd1_commit = false;
}
+ /* Update the VD1 registers */
+ if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 2:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 1:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ };
+
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD1_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD2_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg2,
+ priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD1_FMT_W));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD2_FMT_W));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ writel_relaxed(priv->viu.vd1_range_map_y,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ writel_relaxed(priv->viu.vd1_range_map_cb,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ writel_relaxed(priv->viu.vd1_range_map_cr,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ writel_relaxed(0x78404,
+ priv->io_base + _REG(VPP_SC_MISC));
+ writel_relaxed(priv->viu.vpp_pic_in_height,
+ priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
+ writel_relaxed(priv->viu.vpp_hsc_region12_startp,
+ priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region34_startp,
+ priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region4_endp,
+ priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
+ priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_line_in_length,
+ priv->io_base + _REG(VPP_LINE_IN_LENGTH));
+ writel_relaxed(priv->viu.vpp_preblend_h_size,
+ priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
+ writel_relaxed(priv->viu.vpp_vsc_region12_startp,
+ priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region34_startp,
+ priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region4_endp,
+ priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
+ priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_vsc_ini_phase,
+ priv->io_base + _REG(VPP_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
+ priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
+ writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
+ priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
+ writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+
+ /* Enable VD1 */
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.vd1_commit = false;
+ }
+
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 348b5a198b9d..3ee4d4a4ecba 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -41,6 +41,7 @@
#include "meson_drv.h"
#include "meson_plane.h"
+#include "meson_overlay.h"
#include "meson_crtc.h"
#include "meson_venc_cvbs.h"
@@ -208,24 +209,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
goto free_drm;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ priv->canvas = meson_canvas_get(dev);
+ if (!IS_ERR(priv->canvas)) {
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
+ }
+ } else {
+ priv->canvas = NULL;
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
+
+ priv->dmc = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->dmc)) {
+ dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+ ret = PTR_ERR(priv->dmc);
+ goto free_drm;
+ }
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -264,6 +292,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ ret = meson_overlay_create(priv);
+ if (ret)
+ goto free_drm;
+
ret = meson_crtc_create(priv);
if (ret)
goto free_drm;
@@ -300,6 +332,14 @@ static int meson_drv_bind(struct device *dev)
static void meson_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
+ struct meson_drm *priv = drm->dev_private;
+
+ if (priv->canvas) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
+ }
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index aab96260da9f..4dccf4cd042a 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/soc/amlogic/meson-canvas.h>
#include <drm/drmP.h>
struct meson_drm {
@@ -31,9 +32,16 @@ struct meson_drm {
struct regmap *dmc;
int vsync_irq;
+ struct meson_canvas *canvas;
+ u8 canvas_id_osd1;
+ u8 canvas_id_vd1_0;
+ u8 canvas_id_vd1_1;
+ u8 canvas_id_vd1_2;
+
struct drm_device *drm;
struct drm_crtc *crtc;
struct drm_plane *primary_plane;
+ struct drm_plane *overlay_plane;
/* Components Data */
struct {
@@ -45,6 +53,64 @@ struct meson_drm {
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
+ uint32_t osd_sc_ctrl0;
+ uint32_t osd_sc_i_wh_m1;
+ uint32_t osd_sc_o_h_start_end;
+ uint32_t osd_sc_o_v_start_end;
+ uint32_t osd_sc_v_ini_phase;
+ uint32_t osd_sc_v_phase_step;
+ uint32_t osd_sc_h_ini_phase;
+ uint32_t osd_sc_h_phase_step;
+ uint32_t osd_sc_h_ctrl0;
+ uint32_t osd_sc_v_ctrl0;
+
+ bool vd1_enabled;
+ bool vd1_commit;
+ unsigned int vd1_planes;
+ uint32_t vd1_if0_gen_reg;
+ uint32_t vd1_if0_luma_x0;
+ uint32_t vd1_if0_luma_y0;
+ uint32_t vd1_if0_chroma_x0;
+ uint32_t vd1_if0_chroma_y0;
+ uint32_t vd1_if0_repeat_loop;
+ uint32_t vd1_if0_luma0_rpt_pat;
+ uint32_t vd1_if0_chroma0_rpt_pat;
+ uint32_t vd1_range_map_y;
+ uint32_t vd1_range_map_cb;
+ uint32_t vd1_range_map_cr;
+ uint32_t viu_vd1_fmt_w;
+ uint32_t vd1_if0_canvas0;
+ uint32_t vd1_if0_gen_reg2;
+ uint32_t viu_vd1_fmt_ctrl;
+ uint32_t vd1_addr0;
+ uint32_t vd1_addr1;
+ uint32_t vd1_addr2;
+ uint32_t vd1_stride0;
+ uint32_t vd1_stride1;
+ uint32_t vd1_stride2;
+ uint32_t vd1_height0;
+ uint32_t vd1_height1;
+ uint32_t vd1_height2;
+ uint32_t vpp_pic_in_height;
+ uint32_t vpp_postblend_vd1_h_start_end;
+ uint32_t vpp_postblend_vd1_v_start_end;
+ uint32_t vpp_hsc_region12_startp;
+ uint32_t vpp_hsc_region34_startp;
+ uint32_t vpp_hsc_region4_endp;
+ uint32_t vpp_hsc_start_phase_step;
+ uint32_t vpp_hsc_region1_phase_slope;
+ uint32_t vpp_hsc_region3_phase_slope;
+ uint32_t vpp_line_in_length;
+ uint32_t vpp_preblend_h_size;
+ uint32_t vpp_vsc_region12_startp;
+ uint32_t vpp_vsc_region34_startp;
+ uint32_t vpp_vsc_region4_endp;
+ uint32_t vpp_vsc_start_phase_step;
+ uint32_t vpp_vsc_ini_phase;
+ uint32_t vpp_vsc_phase_ctrl;
+ uint32_t vpp_hsc_phase_ctrl;
+ uint32_t vpp_blend_vd2_h_start_end;
+ uint32_t vpp_blend_vd2_v_start_end;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df7247cd93f9..d8c5cc34e22e 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -594,17 +594,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies for VIC modes */
- switch (vclk_freq) {
- case 54000:
- case 74250:
- case 148500:
- case 297000:
- case 594000:
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
+ return meson_vclk_vic_supported_freq(vclk_freq);
}
/* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
new file mode 100644
index 000000000000..691a9fd16b36
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_overlay.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/* VD1_IF0_GEN_REG */
+#define VD_URGENT_CHROMA BIT(28)
+#define VD_URGENT_LUMA BIT(27)
+#define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines)
+#define VD_DEMUX_MODE_RGB BIT(16)
+#define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val)
+#define VD_CHRO_RPT_LASTL_CTRL BIT(6)
+#define VD_LITTLE_ENDIAN BIT(4)
+#define VD_SEPARATE_EN BIT(1)
+#define VD_ENABLE BIT(0)
+
+/* VD1_IF0_CANVAS0 */
+#define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr)
+#define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr)
+#define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr)
+
+/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
+#define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value)
+#define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value)
+
+/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
+#define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value)
+#define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value)
+
+/* VD1_IF0_GEN_REG2 */
+#define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value)
+
+/* VIU_VD1_FMT_CTRL */
+#define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
+#define VD_HORZ_FMT_EN BIT(20)
+#define VD_VERT_RPT_LINE0 BIT(16)
+#define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
+#define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
+#define VD_VERT_FMT_EN BIT(0)
+
+/* VPP_POSTBLEND_VD1_H_START_END */
+#define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_POSTBLEND_VD1_V_START_END */
+#define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_BLEND_VD2_V_START_END */
+#define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VIU_VD1_FMT_W */
+#define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
+#define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value)
+
+struct meson_overlay {
+ struct drm_plane base;
+ struct meson_drm *priv;
+};
+#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+static int meson_overlay_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
+ FRAC_16_16(5, 1),
+ true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+ return value >> 16;
+}
+
+static const uint8_t skip_tab[6] = {
+ 0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
+};
+
+static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
+ int *repeat, bool interlace)
+{
+ int offset_in = 0;
+ int offset_out = 0;
+ int repeat_skip = 0;
+
+ if (!interlace && ratio_y > (1 << 18))
+ offset_out = (1 * ratio_y) >> 10;
+
+ while ((offset_in + (4 << 8)) <= offset_out) {
+ repeat_skip++;
+ offset_in += 4 << 8;
+ }
+
+ *phase = (offset_out - offset_in) >> 2;
+
+ if (*phase > 0x100)
+ repeat_skip++;
+
+ *phase = *phase & 0xff;
+
+ if (repeat_skip > 5)
+ repeat_skip = 5;
+
+ *repeat = skip_tab[repeat_skip];
+}
+
+static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
+ struct drm_plane *plane,
+ bool interlace_mode)
+{
+ struct drm_crtc_state *crtc_state = priv->crtc->state;
+ int video_top, video_left, video_width, video_height;
+ struct drm_plane_state *state = plane->state;
+ unsigned int vd_start_lines, vd_end_lines;
+ unsigned int hd_start_lines, hd_end_lines;
+ unsigned int crtc_height, crtc_width;
+ unsigned int vsc_startp, vsc_endp;
+ unsigned int hsc_startp, hsc_endp;
+ unsigned int crop_top, crop_left;
+ int vphase, vphase_repeat_skip;
+ unsigned int ratio_x, ratio_y;
+ int temp_height, temp_width;
+ unsigned int w_in, h_in;
+ int temp, start, end;
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ crtc_height = crtc_state->mode.vdisplay;
+ crtc_width = crtc_state->mode.hdisplay;
+
+ w_in = fixed16_to_int(state->src_w);
+ h_in = fixed16_to_int(state->src_h);
+ crop_top = fixed16_to_int(state->src_x);
+ crop_left = fixed16_to_int(state->src_x);
+
+ video_top = state->crtc_y;
+ video_left = state->crtc_x;
+ video_width = state->crtc_w;
+ video_height = state->crtc_h;
+
+ DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
+ crtc_width, crtc_height, interlace_mode);
+ DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
+ w_in, h_in, crop_top, crop_left);
+ DRM_DEBUG("video top %d left %d width %d height %d\n",
+ video_top, video_left, video_width, video_height);
+
+ ratio_x = (w_in << 18) / video_width;
+ ratio_y = (h_in << 18) / video_height;
+
+ if (ratio_x * video_width < (w_in << 18))
+ ratio_x++;
+
+ DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
+
+ meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
+ interlace_mode);
+
+ DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
+
+ /* Vertical */
+
+ start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
+ end = (h_in << 18) / ratio_y + start - 1;
+
+ if (video_top < 0 && start < 0)
+ vd_start_lines = (-(start) * ratio_y) >> 18;
+ else if (start < video_top)
+ vd_start_lines = ((video_top - start) * ratio_y) >> 18;
+ else
+ vd_start_lines = 0;
+
+ if (video_top < 0)
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1);
+ else
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1) - video_top + 1;
+
+ temp = vd_start_lines + (temp_height * ratio_y >> 18);
+ vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
+
+ vd_start_lines += crop_left;
+ vd_end_lines += crop_left;
+
+ /*
+ * TOFIX: Input frames are handled and scaled like progressive frames,
+ * proper handling of interlaced field input frames need to be figured
+ * out using the proper framebuffer flags set by userspace.
+ */
+ if (interlace_mode) {
+ start >>= 1;
+ end >>= 1;
+ }
+
+ vsc_startp = max_t(int, start,
+ max_t(int, 0, video_top));
+ vsc_endp = min_t(int, end,
+ min_t(int, crtc_height - 1,
+ video_top + video_height - 1));
+
+ DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
+ vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
+
+ /* Horizontal */
+
+ start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
+ end = (w_in << 18) / ratio_x + start - 1;
+
+ if (video_left < 0 && start < 0)
+ hd_start_lines = (-(start) * ratio_x) >> 18;
+ else if (start < video_left)
+ hd_start_lines = ((video_left - start) * ratio_x) >> 18;
+ else
+ hd_start_lines = 0;
+
+ if (video_left < 0)
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1);
+ else
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1) - video_left + 1;
+
+ temp = hd_start_lines + (temp_width * ratio_x >> 18);
+ hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ hsc_startp = max_t(int, start, max_t(int, 0, video_left));
+ hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
+ video_left + video_width - 1));
+
+ hd_start_lines += crop_top;
+ hd_end_lines += crop_top;
+
+ DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
+ hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
+
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+
+ priv->viu.vpp_vsc_ini_phase = vphase << 8;
+ priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
+ vphase_repeat_skip;
+
+ priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
+ VD_X_END(hd_end_lines);
+ priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
+ VD_X_END(hd_end_lines >> 1);
+
+ priv->viu.viu_vd1_fmt_w =
+ VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
+ VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
+
+ priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
+ VD_Y_END(vd_end_lines);
+
+ priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
+ VD_Y_END(vd_end_lines >> 1);
+
+ priv->viu.vpp_pic_in_height = h_in;
+
+ priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
+ VD_H_END(hsc_endp);
+ priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
+ VD_H_END(hd_end_lines);
+ priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
+ VD_REGION24_START(hsc_startp);
+ priv->viu.vpp_hsc_region34_startp =
+ VD_REGION13_END(hsc_startp) |
+ VD_REGION24_START(hsc_endp - hsc_startp);
+ priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
+ priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
+ priv->viu.vpp_hsc_region1_phase_slope = 0;
+ priv->viu.vpp_hsc_region3_phase_slope = 0;
+ priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
+
+ priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
+ VD_V_END(vsc_endp);
+ priv->viu.vpp_blend_vd2_v_start_end =
+ VD2_V_START((vd_end_lines + 1) >> 1) |
+ VD2_V_END(vd_end_lines);
+
+ priv->viu.vpp_vsc_region12_startp = 0;
+ priv->viu.vpp_vsc_region34_startp =
+ VD_REGION13_END(vsc_endp - vsc_startp) |
+ VD_REGION24_START(vsc_endp - vsc_startp);
+ priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+}
+
+static void meson_overlay_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct meson_drm *priv = meson_overlay->priv;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags;
+ bool interlace_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Fallback is canvas provider is not available */
+ if (!priv->canvas) {
+ priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
+ priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
+ priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
+ }
+
+ interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+ priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
+ VD_URGENT_LUMA |
+ VD_HOLD_LINES(9) |
+ VD_CHRO_RPT_LASTL_CTRL |
+ VD_ENABLE;
+
+ /* Setup scaler params */
+ meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
+
+ priv->viu.vd1_if0_repeat_loop = 0;
+ priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_range_map_y = 0;
+ priv->viu.vd1_range_map_cb = 0;
+ priv->viu.vd1_range_map_cr = 0;
+
+ /* Default values for RGB888/YUV444 */
+ priv->viu.vd1_if0_gen_reg2 = 0;
+ priv->viu.viu_vd1_fmt_ctrl = 0;
+
+ switch (fb->format->format) {
+ /* TOFIX DRM_FORMAT_RGB888 should be supported */
+ case DRM_FORMAT_YUYV:
+ priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ if (fb->format->format == DRM_FORMAT_NV12)
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
+ else
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YUV410:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_2) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUV422:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV420:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV411:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV410:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ }
+ break;
+ }
+
+ /* Update Canvas with buffer address */
+ priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ gem = drm_fb_cma_get_gem_obj(fb, 2);
+ priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+ priv->viu.vd1_stride2 = fb->pitches[2];
+ priv->viu.vd1_height2 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 2);
+ DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2);
+ /* fallthrough */
+ case 2:
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+ priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+ priv->viu.vd1_stride1 = fb->pitches[1];
+ priv->viu.vd1_height1 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 1);
+ DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1);
+ /* fallthrough */
+ case 1:
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+ priv->viu.vd1_stride0 = fb->pitches[0];
+ priv->viu.vd1_height0 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 0);
+ DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0);
+ }
+
+ priv->viu.vd1_enabled = true;
+
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("\n");
+}
+
+static void meson_overlay_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct meson_drm *priv = meson_overlay->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ priv->viu.vd1_enabled = false;
+
+ /* Disable VD1 */
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
+ .atomic_check = meson_overlay_atomic_check,
+ .atomic_disable = meson_overlay_atomic_disable,
+ .atomic_update = meson_overlay_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
+};
+
+static const struct drm_plane_funcs meson_overlay_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV410,
+};
+
+int meson_overlay_create(struct meson_drm *priv)
+{
+ struct meson_overlay *meson_overlay;
+ struct drm_plane *plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
+ GFP_KERNEL);
+ if (!meson_overlay)
+ return -ENOMEM;
+
+ meson_overlay->priv = priv;
+ plane = &meson_overlay->base;
+
+ drm_universal_plane_init(priv->drm, plane, 0xFF,
+ &meson_overlay_funcs,
+ supported_drm_formats,
+ ARRAY_SIZE(supported_drm_formats),
+ NULL,
+ DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
+
+ drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
+
+ priv->overlay_plane = plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_overlay.h b/drivers/gpu/drm/meson/meson_overlay.h
new file mode 100644
index 000000000000..dae24f5ac63d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OVERLAY_H
+#define __MESON_OVERLAY_H
+
+#include "meson_drv.h"
+
+int meson_overlay_create(struct meson_drm *priv);
+
+#endif /* __MESON_OVERLAY_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80dfcff59..6119a0224278 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/bitfield.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include "meson_plane.h"
@@ -39,12 +41,51 @@
#include "meson_canvas.h"
#include "meson_registers.h"
+/* OSD_SCI_WH_M1 */
+#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
+#define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h)
+
+/* OSD_SCO_H_START_END */
+/* OSD_SCO_V_START_END */
+#define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start)
+#define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end)
+
+/* OSD_SC_CTRL0 */
+#define SC_CTRL0_PATH_EN BIT(3)
+#define SC_CTRL0_SEL_OSD1 BIT(2)
+
+/* OSD_VSC_CTRL0 */
+#define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value)
+#define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value)
+#define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value)
+#define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value)
+#define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value)
+#define VSC_PROG_INTERLACE BIT(23)
+#define VSC_VERTICAL_SCALER_EN BIT(24)
+
+/* OSD_VSC_INI_PHASE */
+#define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom)
+#define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top)
+
+/* OSD_HSC_CTRL0 */
+#define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value)
+#define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value)
+#define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value)
+#define HSC_HORIZ_SCALER_EN BIT(22)
+
+/* VPP_OSD_VSC_PHASE_STEP */
+/* VPP_OSD_HSC_PHASE_STEP */
+#define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value)
+
struct meson_plane {
struct drm_plane base;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_plane(x) container_of(x, struct meson_plane, base)
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -57,10 +98,15 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
+ /*
+ * Only allow :
+ * - Upscaling up to 5x, vertical and horizontal
+ * - Final coordinates must match crtc size
+ */
return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ false, true);
}
/* Takes a fixed 16.16 number and converts it to integer. */
@@ -74,22 +120,20 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_rect dest = drm_plane_state_dest(state);
struct meson_drm *priv = meson_plane->priv;
+ struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem;
- struct drm_rect src = {
- .x1 = (state->src_x),
- .y1 = (state->src_y),
- .x2 = (state->src_x + state->src_w),
- .y2 = (state->src_y + state->src_h),
- };
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
unsigned long flags;
+ int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
+ int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
+ int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
+ int hf_phase_step, vf_phase_step;
+ int src_w, src_h, dst_w, dst_h;
+ int bot_ini_phase;
+ int hf_bank_len;
+ int vf_bank_len;
+ u8 canvas_id_osd1;
/*
* Update Coordinates
@@ -104,8 +148,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
+ if (priv->canvas)
+ canvas_id_osd1 = priv->canvas_id_osd1;
+ else
+ canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+
/* Set up BLK0 to point to the right canvas */
- priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+ priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
OSD_ENDIANNESS_LE);
/* On GXBB, Use the old non-HDR RGB2YUV converter */
@@ -137,23 +186,115 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
break;
};
+ /* Default scaler parameters */
+ vsc_bot_rcv_num = 0;
+ vsc_bot_rpt_p0_num = 0;
+ hf_bank_len = 4;
+ vf_bank_len = 4;
+
if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
- priv->viu.osd1_interlace = true;
+ vsc_bot_rcv_num = 6;
+ vsc_bot_rpt_p0_num = 2;
+ }
+
+ hsc_ini_rcv_num = hf_bank_len;
+ vsc_ini_rcv_num = vf_bank_len;
+ hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
+ vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
+
+ src_w = fixed16_to_int(state->src_w);
+ src_h = fixed16_to_int(state->src_h);
+ dst_w = state->crtc_w;
+ dst_h = state->crtc_h;
+ /*
+ * When the output is interlaced, the OSD must switch between
+ * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+ * at each vsync.
+ * But the vertical scaler can provide such funtionnality if
+ * is configured for 2:1 scaling with interlace options enabled.
+ */
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
dest.y1 /= 2;
dest.y2 /= 2;
- } else
- priv->viu.osd1_interlace = false;
+ dst_h /= 2;
+ }
+
+ hf_phase_step = ((src_w << 18) / dst_w) << 6;
+ vf_phase_step = (src_h << 20) / dst_h;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ bot_ini_phase = ((vf_phase_step / 2) >> 4);
+ else
+ bot_ini_phase = 0;
+
+ vf_phase_step = (vf_phase_step << 4);
+
+ /* In interlaced mode, scaler is always active */
+ if (src_h != dst_h || src_w != dst_w) {
+ priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
+ SCI_WH_M1_H(src_h - 1);
+ priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
+ SCO_HV_END(dest.x2 - 1);
+ priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
+ SCO_HV_END(dest.y2 - 1);
+ /* Enable OSD Scaler */
+ priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
+ } else {
+ priv->viu.osd_sc_i_wh_m1 = 0;
+ priv->viu.osd_sc_o_h_start_end = 0;
+ priv->viu.osd_sc_o_v_start_end = 0;
+ priv->viu.osd_sc_ctrl0 = 0;
+ }
+
+ /* In interlaced mode, vertical scaler is always active */
+ if (src_h != dst_h) {
+ priv->viu.osd_sc_v_ctrl0 =
+ VSC_BANK_LEN(vf_bank_len) |
+ VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
+ VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
+ VSC_VERTICAL_SCALER_EN;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ priv->viu.osd_sc_v_ctrl0 |=
+ VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
+ VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
+ VSC_PROG_INTERLACE;
+
+ priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
+ priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
+ } else {
+ priv->viu.osd_sc_v_ctrl0 = 0;
+ priv->viu.osd_sc_v_phase_step = 0;
+ priv->viu.osd_sc_v_ini_phase = 0;
+ }
+
+ /* Horizontal scaler is only used if width does not match */
+ if (src_w != dst_w) {
+ priv->viu.osd_sc_h_ctrl0 =
+ HSC_BANK_LENGTH(hf_bank_len) |
+ HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
+ HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
+ HSC_HORIZ_SCALER_EN;
+ priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
+ priv->viu.osd_sc_h_ini_phase = 0;
+ } else {
+ priv->viu.osd_sc_h_ctrl0 = 0;
+ priv->viu.osd_sc_h_phase_step = 0;
+ priv->viu.osd_sc_h_ini_phase = 0;
+ }
/*
* The format of these registers is (x2 << 16 | x1),
* where x2 is exclusive.
* e.g. +30x1920 would be (1919 << 16) | 30
*/
- priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
- fixed16_to_int(src.x1);
- priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
- fixed16_to_int(src.y1);
+ priv->viu.osd1_blk0_cfg[1] =
+ ((fixed16_to_int(state->src.x2) - 1) << 16) |
+ fixed16_to_int(state->src.x1);
+ priv->viu.osd1_blk0_cfg[2] =
+ ((fixed16_to_int(state->src.y2) - 1) << 16) |
+ fixed16_to_int(state->src.y1);
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
@@ -164,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
+ if (!meson_plane->enabled) {
+ /* Reset OSD1 before enabling it on GXL+ SoCs */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_viu_osd1_reset(priv);
+
+ meson_plane->enabled = true;
+ }
+
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
@@ -177,12 +327,15 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ meson_plane->enabled = false;
+
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.atomic_check = meson_plane_atomic_check,
.atomic_disable = meson_plane_atomic_disable,
.atomic_update = meson_plane_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
};
static const struct drm_plane_funcs meson_plane_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index bca87143e548..5c7e02c703bc 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -286,6 +286,7 @@
#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VD1_IF0_GEN_REG3 0x1aa7
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -297,6 +298,7 @@
#define VIU_OSD1_OETF_CTL 0x1adc
#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+#define AFBC_ENABLE 0x1ae0
/* vpp */
#define VPP_DUMMY_DATA 0x1d00
@@ -349,6 +351,7 @@
#define VPP_VD2_PREBLEND BIT(15)
#define VPP_OSD1_PREBLEND BIT(16)
#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_COLOR_MNG_ENABLE BIT(28)
#define VPP_OFIFO_SIZE 0x1d27
#define VPP_FIFO_STATUS 0x1d28
#define VPP_SMOKE_CTRL 0x1d29
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ae5473257f72..f6ba35a405f8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -117,6 +117,8 @@
#define HDMI_PLL_RESET BIT(28)
#define HDMI_PLL_LOCK BIT(31)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+
/* VID PLL Dividers */
enum {
VID_PLL_DIV_1 = 0,
@@ -323,7 +325,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
- MESON_VCLK_HDMI_ENCI_54000 = 1,
+ MESON_VCLK_HDMI_ENCI_54000 = 0,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
@@ -339,6 +341,7 @@ enum {
};
struct meson_vclk_params {
+ unsigned int pixel_freq;
unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
@@ -347,6 +350,7 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -355,6 +359,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -363,6 +368,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 4,
.pll_od2 = 1,
@@ -371,6 +377,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pixel_freq = 74250,
.pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
@@ -379,6 +386,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
@@ -387,6 +395,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pixel_freq = 297000,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -395,6 +404,7 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pixel_freq = 594000,
.pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -402,6 +412,7 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ { /* sentinel */ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -626,12 +637,37 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("freq = %d\n", freq);
+
+ for (i = 0 ; params[i].pixel_freq ; ++i) {
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ i, params[i].pixel_freq,
+ FREQ_1000_1001(params[i].pixel_freq));
+ /* Match strict frequency */
+ if (freq == params[i].pixel_freq)
+ return MODE_OK;
+ /* Match 1000/1001 variant */
+ if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
+
static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
unsigned int od1, unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
- bool hdmi_use_enci)
+ bool hdmi_use_enci, bool vic_alternate_clock)
{
+ unsigned int m = 0, frac = 0;
+
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -646,34 +682,38 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
- od1, od2, od3);
+ m = 0x3d;
+ frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0x5a, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0x59 : 0x5a;
+ frac = vic_alternate_clock ? 0xe8f : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x281 : 0x300;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0xb4, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x347 : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
- od1, od2, od3);
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x102 : 0x200;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
@@ -826,6 +866,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci)
{
+ bool vic_alternate_clock = false;
unsigned int freq;
unsigned int hdmi_tx_div;
unsigned int venc_div;
@@ -843,7 +884,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - encp encoder
*/
meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
- VID_PLL_DIV_5, 2, 1, 1, false);
+ VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -863,31 +904,35 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ if (vclk_freq == params[freq].pixel_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
+ if (vclk_freq != params[freq].pixel_freq)
+ vic_alternate_clock = true;
+ else
+ vic_alternate_clock = false;
+
+ if (freq == MESON_VCLK_HDMI_ENCI_54000 &&
+ !hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_54000 &&
+ hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_148500 &&
+ dac_freq == vclk_freq)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_148500 &&
+ dac_freq != vclk_freq)
+ continue;
+ break;
+ }
+ }
+
+ if (!params[freq].pixel_freq) {
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
return;
}
@@ -895,6 +940,6 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
- hdmi_use_enci);
+ hdmi_use_enci, vic_alternate_clock);
}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 869fa3a3073e..4bd8752da02a 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -32,6 +32,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index acbbad3e322c..e95e0e7a7fa1 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,6 +697,132 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p24 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1660-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p25 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1440-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p30 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+560-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -717,6 +843,9 @@ struct meson_hdmi_venc_vic_mode {
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
+ { 93, &meson_hdmi_encp_mode_2160p24 },
+ { 94, &meson_hdmi_encp_mode_2160p25 },
+ { 95, &meson_hdmi_encp_mode_2160p30 },
{ 0, NULL}, /* sentinel */
};
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..0ba87ff95530 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv)
true);
}
+/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
+void meson_viu_osd1_reset(struct meson_drm *priv)
+{
+ uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
+
+ /* Save these 2 registers state */
+ osd1_fifo_ctrl_stat = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ osd1_ctrl_stat2 = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reset OSD1 */
+ writel_bits_relaxed(BIT(0), BIT(0),
+ priv->io_base + _REG(VIU_SW_RESET));
+ writel_bits_relaxed(BIT(0), 0,
+ priv->io_base + _REG(VIU_SW_RESET));
+
+ /* Rewrite these registers state lost in the reset */
+ writel_relaxed(osd1_fifo_ctrl_stat,
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ writel_relaxed(osd1_ctrl_stat2,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reload the conversion matrix */
+ meson_viu_load_matrix(priv);
+}
+
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
@@ -329,6 +356,21 @@ void meson_viu_init(struct meson_drm *priv)
0xff << OSD_REPLACE_SHIFT,
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
+ /* Disable VD1 AFBC */
+ /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
+ writel_bits_relaxed(0x7 << 16, 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ /* afbc vd1 set=0 */
+ writel_bits_relaxed(BIT(20), 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
+
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index 073b1910bd1b..0f84bddd2ff0 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -59,6 +59,7 @@
#define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6
+void meson_viu_osd1_reset(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 27356f81a0ab..f9efb431e953 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -51,52 +51,6 @@ void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
}
-/*
- * When the output is interlaced, the OSD must switch between
- * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
- * at each vsync.
- * But the vertical scaler can provide such funtionnality if
- * is configured for 2:1 scaling with interlace options enabled.
- */
-void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
- struct drm_rect *input)
-{
- writel_relaxed(BIT(3) /* Enable scaler */ |
- BIT(2), /* Select OSD1 */
- priv->io_base + _REG(VPP_OSD_SC_CTRL0));
-
- writel_relaxed(((drm_rect_width(input) - 1) << 16) |
- (drm_rect_height(input) - 1),
- priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
- /* 2:1 scaling */
- writel_relaxed(((input->x1) << 16) | (input->x2),
- priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
- writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
- priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
-
- /* 2:1 scaling values */
- writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
- writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
-
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-
- writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
- (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
- (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
- (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
- (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
- BIT(23) /* osd_prog_interlace */ |
- BIT(24), /* Enable vertical scaler */
- priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
-}
-
-void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
-{
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-}
-
static unsigned int vpp_filter_coefs_4point_bspline[] = {
0x15561500, 0x14561600, 0x13561700, 0x12561800,
0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
@@ -122,6 +76,31 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
priv->io_base + _REG(VPP_OSD_SCALE_COEF));
}
+static const uint32_t vpp_filter_coefs_bicubic[] = {
+ 0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
+ 0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
+ 0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
+ 0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
+ 0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
+ 0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
+ 0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
+ 0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
+ 0xf84848f8
+};
+
+static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
+ const unsigned int *coefs,
+ bool is_horizontal)
+{
+ int i;
+
+ writel_relaxed(is_horizontal ? BIT(8) : 0,
+ priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+ for (i = 0; i < 33; i++)
+ writel_relaxed(coefs[i],
+ priv->io_base + _REG(VPP_SCALE_COEF));
+}
+
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
@@ -150,17 +129,34 @@ void meson_vpp_init(struct meson_drm *priv)
/* Force all planes off */
writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(4 | (4 << 8) | BIT(15),
+ priv->io_base + _REG(VPP_SC_MISC));
+
+ writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, false);
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, true);
+
+ /* Write the VD proper filter coefficients. */
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ false);
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ true);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 2393e6d16ffd..88ba003979e6 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -417,7 +417,7 @@ static int mxsfb_probe(struct platform_device *pdev)
err_unload:
mxsfb_unload(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -428,7 +428,7 @@ static int mxsfb_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mxsfb_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index fa8bfa7c492d..33c22ee036f8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -96,7 +96,7 @@ static int s6d16d0_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_tear_on(dsi,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to enble vblank TE (%d)\n",
+ DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5fbee837b0db..9c69e739a524 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
+static const struct drm_display_mode auo_g101evn010_mode = {
+ .clock = 68930,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 82,
+ .hsync_end = 1280 + 82 + 2,
+ .htotal = 1280 + 82 + 2 + 84,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 8 + 2 + 6,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g101evn010 = {
+ .modes = &auo_g101evn010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -2494,6 +2518,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
+ .compatible = "auo,g101evn010",
+ .data = &auo_g101evn010,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 5fa0441bb6df..38c938c9adda 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
}
}
+ of_node_put(root);
+
/*
* If there is a coretile HDLCD and it has a driver,
* do not mux the CLCD on the motherboard to the DVI.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 2ce9a8dcec84..ce0b9c40fc21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -622,10 +622,14 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_kunmap;
- ret = qxl_release_reserve_list(release, true);
+ ret = qxl_bo_pin(cursor_bo);
if (ret)
goto out_free_bo;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_unpin;
+
ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
if (ret)
goto out_backoff;
@@ -670,15 +674,17 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
- if (old_cursor_bo)
- qxl_bo_unref(&old_cursor_bo);
-
+ if (old_cursor_bo != NULL)
+ qxl_bo_unpin(old_cursor_bo);
+ qxl_bo_unref(&old_cursor_bo);
qxl_bo_unref(&cursor_bo);
return;
out_backoff:
qxl_release_backoff_reserve_list(release);
+out_unpin:
+ qxl_bo_unpin(cursor_bo);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_kunmap:
@@ -757,7 +763,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ ret = qxl_bo_pin(user_bo);
if (ret)
return ret;
@@ -1104,7 +1110,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
- ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
+ ret = qxl_bo_pin(qdev->monitors_config_bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index c34e45662965..c408bb83c7a9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -247,8 +247,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
qxl_release_fence_buffer_objects(release);
out_free_palette:
- if (palette_bo)
- qxl_bo_unref(&palette_bo);
+ qxl_bo_unref(&palette_bo);
out_free_image:
qxl_image_free_objects(qdev, dimage);
out_free_drawable:
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 7e047c985ea6..a819d24225d2 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -111,7 +111,7 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
qbo->surf.stride = mode_cmd->pitches[0];
qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
- ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+ ret = qxl_bo_pin(qbo);
if (ret) {
goto out_unref;
}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index f6975d7c7d10..15238a413f9d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -313,10 +313,8 @@ error:
void qxl_device_fini(struct qxl_device *qdev)
{
- if (qdev->current_release_bo[0])
- qxl_bo_unref(&qdev->current_release_bo[0]);
- if (qdev->current_release_bo[1])
- qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_bo_unref(&qdev->current_release_bo[0]);
+ qxl_bo_unref(&qdev->current_release_bo[1]);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index f67a3c535afb..91f3bbc73ecc 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -186,13 +186,9 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
- struct io_mapping *map;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
- map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
- map = qdev->surface_mapping;
- else
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
@@ -200,7 +196,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
- return ;
+ return;
fallback:
qxl_bo_kunmap(bo);
}
@@ -220,7 +216,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
@@ -228,16 +224,12 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (bo->pin_count) {
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain, true);
+ qxl_ttm_placement_from_domain(bo, bo->type, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -270,7 +262,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
* beforehand, use the internal version directly __qxl_bo_pin.
*
*/
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
@@ -278,7 +270,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (r)
return r;
- r = __qxl_bo_pin(bo, bo->type, NULL);
+ r = __qxl_bo_pin(bo);
qxl_bo_unreserve(bo);
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b40fc9a10406..255b914e2a7b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -97,7 +97,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
-extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 18030e2be71f..30f85f0130cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -427,8 +427,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
- struct qxl_bo *qbo;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -449,14 +447,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- driver = bdev->driver;
glob = bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- qbo = to_qxl_bo(bo);
reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 17741843cf51..90dacab67be5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
* system clock, and have no internal clock divider.
*/
- if (WARN_ON(!rcrtc->extclock))
- return;
-
/*
* The H3 ES1.x exhibits dot clock duty cycle stability issues.
* We can work around them by configuring the DPLL to twice the
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
* CRTC will be put later in .atomic_disable().
*
* If a mode set is not in progress the CRTC is enabled, and the
- * following get call will be a no-op. There is thus no need to belance
+ * following get call will be a no-op. There is thus no need to balance
* it in .atomic_flush() either.
*/
rcar_du_crtc_get(rcrtc);
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->group->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
return MODE_NO_INTERLACE;
+ /*
+ * The hardware requires a minimum combined horizontal sync and back
+ * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+ */
+ if (mode->htotal - mode->hsync_start < 20)
+ return MODE_HBLANK_NARROW;
+
+ vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
+ if (vbp < 3)
+ return MODE_VBLANK_NARROW;
+
return MODE_OK;
}
@@ -1002,7 +1011,7 @@ unlock:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- return 0;
+ return ret;
}
static const struct drm_crtc_funcs crtc_funcs_gen2 = {
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
clk = devm_clk_get(rcdu->dev, clk_name);
if (!IS_ERR(clk)) {
rcrtc->extclock = clk;
- } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
- dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
+ } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
+ /*
+ * DU channels that have a display PLL can't use the internal
+ * system clock and thus require an external clock.
+ */
+ ret = PTR_ERR(clk);
+ dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
+ return ret;
}
init_waitqueue_head(&rcrtc->flip_wait);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 7015974c247a..f50a3b1864bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_drv.h"
@@ -41,7 +42,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
- * R8A7743 has one RGB output and one LVDS output
+ * R8A774[34] has one RGB output and one LVDS output
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
@@ -77,6 +78,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
},
};
+static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A77470 has two RGB outputs, one LVDS output, and
+ * one (currently unsupported) analog video output
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0) | BIT(1),
+ .port = 2,
+ },
+ },
+};
+
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_INTERLACED
@@ -341,7 +369,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+ { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
+ { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -363,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations
*/
-static void rcar_du_lastclose(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(rcdu->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .lastclose = rcar_du_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -431,13 +453,10 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -493,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev)
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+ drm_fbdev_generic_setup(ddev, 32);
+
return 0;
error:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 9f5563296c5a..a68da79b424e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -20,7 +20,6 @@
struct clk;
struct device;
struct drm_device;
-struct drm_fbdev_cma;
struct rcar_du_device;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
@@ -78,7 +77,6 @@ struct rcar_du_device {
void __iomem *mmio;
struct drm_device *ddev;
- struct drm_fbdev_cma *fbdev;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 4ebd61ecbee1..9c7007d45408 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void rcar_du_output_poll_changed(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(rcdu->fbdev);
-}
-
/* -----------------------------------------------------------------------------
* Atomic Check and Update
*/
@@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
- .output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder;
- struct drm_fbdev_cma *fbdev;
unsigned int dpad0_sources;
unsigned int num_encoders;
unsigned int num_groups;
@@ -582,7 +573,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+ ret = drm_vblank_init(dev, rcdu->num_crtcs);
if (ret < 0)
return ret;
@@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
drm_kms_helper_poll_init(dev);
- if (dev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- rcdu->fbdev = fbdev;
- } else {
- dev_info(rcdu->dev,
- "no connector found, disabling fbdev emulation\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 9e07758a755c..39d5ae3fdf72 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_helper_add(&plane->plane,
&rcar_du_plane_helper_funcs);
+ drm_plane_create_alpha_property(&plane->plane);
+
if (type == DRM_PLANE_TYPE_PRIMARY)
continue;
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
- drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 173d7ad0b991..534a128a869d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
{ .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965fc260..5a485489a1e2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
}
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
- u8 *buff, u8 buff_size)
+ u8 *buff, u16 buff_size)
{
u32 i;
int ret;
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 383d8d6c5847..1bb73dc4c88c 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,4 +1,5 @@
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
- test-drm_format.o test-drm_framebuffer.o
+ test-drm_format.o test-drm_framebuffer.o \
+ test-drm_damage_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 92601defd8f6..464753746013 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -11,3 +11,24 @@ selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
+selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
+selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
+selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
+selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
+selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
+selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
+selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
+selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
+selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
+selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
+selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
+selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
+selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
+selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
+selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
+selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
+selftest(damage_iter_damage, igt_damage_iter_damage)
+selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
+selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
+selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
+selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
new file mode 100644
index 000000000000..a2f753205a3e
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_damage_helper: " fmt
+
+#include <drm/drm_damage_helper.h>
+
+#include "test-drm_modeset_common.h"
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, uint32_t size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state->src.x1 >> 16;
+ int src_y1 = state->src.y1 >> 16;
+ int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2) {
+ pr_err("Cannot have damage clip with no dimention.\n");
+ return false;
+ }
+
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
+ pr_err("Damage cannot be outside rounded plane src.\n");
+ return false;
+ }
+
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
+ pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
+ return false;
+ }
+
+ return true;
+}
+
+int igt_damage_iter_no_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src same as fb size. */
+ set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
+ set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_crtc(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = 0,
+ .fb = &fb,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_fb(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = 0,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_simple_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_intersect(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_outside(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return round off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should not return any damage.");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index d5df5bd51b05..8c76f09c12d1 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -18,5 +18,26 @@ int igt_check_drm_format_block_width(void *ignored);
int igt_check_drm_format_block_height(void *ignored);
int igt_check_drm_format_min_pitch(void *ignored);
int igt_check_drm_framebuffer_create(void *ignored);
+int igt_damage_iter_no_damage(void *ignored);
+int igt_damage_iter_no_damage_fractional_src(void *ignored);
+int igt_damage_iter_no_damage_src_moved(void *ignored);
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_no_damage_not_visible(void *ignored);
+int igt_damage_iter_no_damage_no_crtc(void *ignored);
+int igt_damage_iter_no_damage_no_fb(void *ignored);
+int igt_damage_iter_simple_damage(void *ignored);
+int igt_damage_iter_single_damage(void *ignored);
+int igt_damage_iter_single_damage_intersect_src(void *ignored);
+int igt_damage_iter_single_damage_outside_src(void *ignored);
+int igt_damage_iter_single_damage_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_src_moved(void *ignored);
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_damage(void *ignored);
+int igt_damage_iter_damage_one_intersect(void *ignored);
+int igt_damage_iter_damage_one_outside(void *ignored);
+int igt_damage_iter_damage_src_moved(void *ignored);
+int igt_damage_iter_damage_not_visible(void *ignored);
#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 6ececad6f845..8554102a6ead 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -194,7 +194,7 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -290,7 +290,7 @@ err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
err_free_drm_dev:
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 61c2379fba87..ed76e52eb213 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -252,10 +252,8 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
- struct sti_private *priv;
unsigned int pipe;
- priv = crtc->dev->dev_private;
pipe = drm_crtc_index(crtc);
compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
mixer = compo->mixer[pipe];
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf49c55b0f2c..9e9255ee59cd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -48,8 +48,12 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
/*
* These coefficients are taken from the A33 BSP from Allwinner.
*
- * The formula is for each component, each coefficient being multiplied by
- * 1024 and each constant being multiplied by 16:
+ * The first three values of each row are coded as 13-bit signed fixed-point
+ * numbers, with 10 bits for the fractional part. The fourth value is a
+ * constant coded as a 14-bit signed fixed-point number with 4 bits for the
+ * fractional part.
+ *
+ * The values in table order give the following colorspace translation:
* G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
* R = 1.164 * Y + 1.596 * V - 222
* B = 1.164 * Y + 2.018 * U + 276
@@ -155,6 +159,36 @@ static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
return 0;
}
+static const uint32_t sun4i_backend_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+};
+
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
+ if (sun4i_backend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -395,6 +429,15 @@ int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
return 0;
}
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer)
+{
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+}
+
static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
{
u16 src_h = state->src_h >> 16;
@@ -413,11 +456,50 @@ static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
struct sun4i_backend *backend = layer->backend;
+ uint32_t format = state->fb->format->format;
+ uint64_t modifier = state->fb->modifier;
if (IS_ERR(backend->frontend))
return false;
- return sun4i_backend_plane_uses_scaler(state);
+ if (!sun4i_frontend_format_is_supported(format, modifier))
+ return false;
+
+ if (!sun4i_backend_format_is_supported(format, modifier))
+ return true;
+
+ /*
+ * TODO: The backend alone allows 2x and 4x integer scaling, including
+ * support for an alpha component (which the frontend doesn't support).
+ * Use the backend directly instead of the frontend in this case, with
+ * another test to return false.
+ */
+
+ if (sun4i_backend_plane_uses_scaler(state))
+ return true;
+
+ /*
+ * Here the format is supported by both the frontend and the backend
+ * and no frontend scaling is required, so use the backend directly.
+ */
+ return false;
+}
+
+static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
+ bool *uses_frontend)
+{
+ if (sun4i_backend_plane_uses_frontend(state)) {
+ *uses_frontend = true;
+ return true;
+ }
+
+ *uses_frontend = false;
+
+ /* Scaling is not supported without the frontend. */
+ if (sun4i_backend_plane_uses_scaler(state))
+ return false;
+
+ return true;
}
static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
@@ -460,14 +542,19 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_format_name_buf format_name;
- if (sun4i_backend_plane_uses_frontend(plane_state)) {
+ if (!sun4i_backend_plane_is_supported(plane_state,
+ &layer_state->uses_frontend))
+ return -EINVAL;
+
+ if (layer_state->uses_frontend) {
DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
plane->index);
-
- layer_state->uses_frontend = true;
num_frontend_planes++;
} else {
- layer_state->uses_frontend = false;
+ if (fb->format->is_yuv) {
+ DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+ num_yuv_planes++;
+ }
}
DRM_DEBUG_DRIVER("Plane FB format is %s\n",
@@ -476,11 +563,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (fb->format->is_yuv) {
- DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
- num_yuv_planes++;
- }
-
DRM_DEBUG_DRIVER("Plane zpos is %d\n",
plane_state->normalized_zpos);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index e3d4c6035eb2..01f66463271b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -198,6 +198,7 @@ engine_to_sun4i_backend(struct sunxi_engine *engine)
void sun4i_backend_layer_enable(struct sun4i_backend *backend,
int layer, bool enable);
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier);
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
@@ -208,5 +209,7 @@ int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
int layer, uint32_t in_fmt);
int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer);
#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index ef773d36baf0..9e4c375ccc96 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -28,6 +28,16 @@
#include "sun4i_tcon.h"
#include "sun8i_tcon_top.h"
+static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ /* The hardware only allows even pitches for YUV buffers. */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
@@ -42,7 +52,7 @@ static struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_sun4i_gem_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -400,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ .compatible = "allwinner,sun50i-a64-display-engine" },
+ { .compatible = "allwinner,sun50i-h6-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ddf6cfa6dd23..1a7ebc45747e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -107,8 +107,34 @@ EXPORT_SYMBOL(sun4i_frontend_update_buffer);
static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
- case DRM_FORMAT_ARGB8888:
- *val = 5;
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sun4i_frontend_drm_format_to_input_mode(uint32_t fmt, u32 *val)
+{
+ if (drm_format_num_planes(fmt) == 1)
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sun4i_frontend_drm_format_to_input_sequence(uint32_t fmt, u32 *val)
+{
+ switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
+ return 0;
+
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
return 0;
default:
@@ -119,9 +145,12 @@ static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888;
+ return 0;
+
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- *val = 2;
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888;
return 0;
default:
@@ -129,22 +158,54 @@ static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
}
}
+static const uint32_t sun4i_frontend_formats[] = {
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
+ if (sun4i_frontend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sun4i_frontend_format_is_supported);
+
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
+ uint32_t format = fb->format->format;
u32 out_fmt_val;
- u32 in_fmt_val;
+ u32 in_fmt_val, in_mod_val, in_ps_val;
int ret;
- ret = sun4i_frontend_drm_format_to_input_fmt(fb->format->format,
- &in_fmt_val);
+ ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid input format\n");
return ret;
}
+ ret = sun4i_frontend_drm_format_to_input_mode(format, &in_mod_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid input mode\n");
+ return ret;
+ }
+
+ ret = sun4i_frontend_drm_format_to_input_sequence(format, &in_ps_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid pixel sequence\n");
+ return ret;
+ }
+
ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid output format\n");
@@ -162,10 +223,12 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
+ SUN4I_FRONTEND_BYPASS_CSC_EN,
+ SUN4I_FRONTEND_BYPASS_CSC_EN);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
- SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(1) |
- SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(in_fmt_val) |
- SUN4I_FRONTEND_INPUT_FMT_PS(1));
+ in_mod_val | in_fmt_val | in_ps_val);
/*
* TODO: It look like the A31 and A80 at least will need the
@@ -173,7 +236,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* ARGB8888).
*/
regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
- SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(out_fmt_val));
+ out_fmt_val);
return 0;
}
@@ -183,16 +246,24 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ uint32_t luma_width, luma_height;
+ uint32_t chroma_width, chroma_height;
/* Set height and width */
DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
+
+ luma_width = state->src_w >> 16;
+ luma_height = state->src_h >> 16;
+
+ chroma_width = DIV_ROUND_UP(luma_width, fb->format->hsub);
+ chroma_height = DIV_ROUND_UP(luma_height, fb->format->vsub);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(luma_height, luma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(chroma_height, chroma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
@@ -200,14 +271,14 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (luma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (chroma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (luma_height << 16) / state->crtc_h);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (chroma_height << 16) / state->crtc_h);
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
@@ -339,10 +410,6 @@ static int sun4i_frontend_runtime_resume(struct device *dev)
SUN4I_FRONTEND_EN_EN,
SUN4I_FRONTEND_EN_EN);
- regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
- SUN4I_FRONTEND_BYPASS_CSC_EN,
- SUN4I_FRONTEND_BYPASS_CSC_EN);
-
sun4i_frontend_scaler_init(frontend);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 02661ce81f3e..ad146e8d8d70 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -26,12 +26,14 @@
#define SUN4I_FRONTEND_LINESTRD0_REG 0x040
#define SUN4I_FRONTEND_INPUT_FMT_REG 0x04c
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(mod) ((mod) << 8)
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(fmt) ((fmt) << 4)
-#define SUN4I_FRONTEND_INPUT_FMT_PS(ps) (ps)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED (1 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB (5 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX 0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB 1
#define SUN4I_FRONTEND_OUTPUT_FMT_REG 0x05c
-#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(fmt) (fmt)
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888 1
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888 2
#define SUN4I_FRONTEND_CH0_INSIZE_REG 0x100
#define SUN4I_FRONTEND_INSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1)))
@@ -95,5 +97,6 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane);
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt);
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier);
#endif /* _SUN4I_FRONTEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 78f77af8805a..29631e0efde3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
#include "sun4i_backend.h"
@@ -92,14 +93,16 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct sun4i_backend *backend = layer->backend;
struct sun4i_frontend *frontend = backend->frontend;
+ sun4i_backend_cleanup_layer(backend, layer->id);
+
if (layer_state->uses_frontend) {
sun4i_frontend_init(frontend);
sun4i_frontend_update_coord(frontend, plane);
sun4i_frontend_update_buffer(frontend, plane);
sun4i_frontend_update_formats(frontend, plane,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_backend_update_layer_frontend(backend, layer->id,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_frontend_enable(frontend);
} else {
sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -112,6 +115,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
@@ -125,10 +129,11 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
.update_plane = drm_atomic_helper_update_plane,
};
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
@@ -154,8 +159,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
- sun4i_backend_layer_formats,
- ARRAY_SIZE(sun4i_backend_layer_formats),
+ sun4i_layer_formats,
+ ARRAY_SIZE(sun4i_layer_formats),
NULL, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index f949287d926c..0420f5c978b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -478,8 +478,11 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
}
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
+ struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
+ struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -491,8 +494,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- if (tcon->panel)
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ sun4i_tcon0_mode_set_dithering(tcon, connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -541,6 +543,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
/*
* On A20 and similar SoCs, the only way to achieve Positive Edge
* (Rising Edge), is setting dclk clock phase to 2/3(240°).
@@ -556,20 +561,16 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (tcon->panel) {
- struct drm_panel *panel = tcon->panel;
- struct drm_connector *connector = panel->connector;
- struct drm_display_info display_info = connector->display_info;
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
- }
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
- SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
/* Map output pins to channel 0 */
@@ -684,7 +685,7 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_NONE:
- sun4i_tcon0_mode_set_rgb(tcon, mode);
+ sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
break;
case DRM_MODE_ENCODER_TVDAC:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 3d492c8be1fc..b5214d71610f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -116,6 +116,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index e3fc8fa920fb..18534263a05d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -300,6 +301,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
.atomic_update = sun8i_ui_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 4249edfb47ed..87be898f9b7a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -336,6 +337,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
.atomic_update = sun8i_vi_layer_atomic_update,
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 9af51d982a33..01a6f2d42440 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -36,77 +36,6 @@
* and registers the DRM device using devm_tinydrm_register().
*/
-/**
- * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
- * another driver's scatter/gather table of pinned pages
- * @drm: DRM device to import into
- * @attach: DMA-BUF attachment
- * @sgt: Scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table exported via DMA-BUF by
- * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
- * kernel virtual address on the CMA object. Drivers should use this as their
- * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
- * address. tinydrm_gem_cma_free_object() should be used in combination with
- * this function.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *obj;
- void *vaddr;
-
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
- }
-
- obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
- if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
- return obj;
- }
-
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
-
- return obj;
-}
-EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
-
-/**
- * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
- * object
- * @gem_obj: GEM object to free
- *
- * This function frees the backing memory of the CMA GEM object, cleans up the
- * GEM object state and frees the memory used to store the object itself using
- * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
- * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object_unlocked callback.
- */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
-{
- if (gem_obj->import_attach) {
- struct drm_gem_cma_object *cma_obj;
-
- cma_obj = to_drm_gem_cma_obj(gem_obj);
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
- cma_obj->vaddr = NULL;
- }
-
- drm_gem_cma_free_object(gem_obj);
-}
-EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
-
static struct drm_framebuffer *
tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index dcd390163a4a..bf6bfbc5d412 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -9,12 +9,18 @@
#include <linux/backlight.h>
#include <linux/dma-buf.h>
+#include <linux/module.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/swab.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
static unsigned int spi_max;
module_param(spi_max, uint, 0400);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index c3e51c2baebc..81a2bbeb25d4 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -188,7 +188,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe012f5..78f7c2d1b449 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,7 +20,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -367,7 +368,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 6701037749a7..51395bdc6ca2 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -144,7 +144,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d7bb4c5e6657..3fa62e77c30b 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,9 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
@@ -153,7 +153,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 1bb870021f6e..3a05e56f9b0d 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,15 +9,19 @@
* (at your option) any later version.
*/
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 50a1d4216ce7..54d6fe0f37ce 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,8 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -106,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header,
/* Stack allocated tx? */
if (tx && len <= 32) {
- txbuf = kmalloc(len, GFP_KERNEL);
+ txbuf = kmemdup(tx, len, GFP_KERNEL);
if (!txbuf) {
ret = -ENOMEM;
goto out_free;
}
- memcpy(txbuf, tx, len);
}
if (rx) {
@@ -882,7 +883,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 2fcbc3067d71..a6a8a1081b73 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,7 +17,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -303,7 +304,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3081bc57c116..b39779e0dcd8 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -119,7 +119,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 72efcecb44f7..28e2d03c0ccf 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -249,7 +249,7 @@ static int tve200_probe(struct platform_device *pdev)
clk_disable:
clk_disable_unprepare(priv->pclk);
dev_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -263,7 +263,7 @@ static int tve200_remove(struct platform_device *pdev)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
clk_disable_unprepare(priv->pclk);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d96518a131..a08766d39eab 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
bo->resv = attach->dmabuf->resv;
bo->sgt = sgt;
+ obj->import_attach = attach;
v3d_bo_get_pages(bo);
v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 2a85fa68ffea..f0afcec72c34 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
- /* Any params that aren't just register reads would go here. */
- DRM_DEBUG("Unknown parameter %d\n", args->param);
- return -EINVAL;
+ switch (args->param) {
+ case DRM_V3D_PARAM_SUPPORTS_TFU:
+ args->value = 1;
+ return 0;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
}
static int
@@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = {
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
* able to submit CLs that could access BOs from clients authenticated
- * with the master node.
+ * with the master node. The TFU doesn't use the GMP, so it would
+ * need to stay DRM_AUTH until we do buffer size/offset validation.
*/
static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
@@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct vm_operations_struct v3d_vm_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index cbe5be0c47eb..dcb772a19191 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -7,19 +7,18 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
+#include "uapi/drm/v3d_drm.h"
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. We maintain various queue
- * tracking as an array because at some point we'll want to support
- * the TFU (texture formatting unit) as another queue.
- */
+/* Enum for each of the V3D queues. */
enum v3d_queue {
V3D_BIN,
V3D_RENDER,
+ V3D_TFU,
};
-#define V3D_MAX_QUEUES (V3D_RENDER + 1)
+#define V3D_MAX_QUEUES (V3D_TFU + 1)
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
@@ -68,6 +67,7 @@ struct v3d_dev {
struct v3d_exec_info *bin_job;
struct v3d_exec_info *render_job;
+ struct v3d_tfu_job *tfu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -218,6 +218,25 @@ struct v3d_exec_info {
u32 qma, qms, qts;
};
+struct v3d_tfu_job {
+ struct drm_sched_job base;
+
+ struct drm_v3d_submit_tfu args;
+
+ /* An optional fence userspace can pass in for the job to depend on. */
+ struct dma_fence *in_fence;
+
+ /* v3d fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct v3d_dev *v3d;
+
+ struct kref refcount;
+
+ /* This is the array of BOs that were looked up at the start of exec. */
+ struct v3d_bo *bo[4];
+};
+
/**
* _wait_for - magic (register) wait macro
*
@@ -281,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void v3d_exec_put(struct v3d_exec_info *exec);
+void v3d_tfu_job_put(struct v3d_tfu_job *exec);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_flush_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 50bfcf9a8a1a..b0a2a1ae2eb1 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
{
struct v3d_fence *f = to_v3d_fence(fence);
- if (f->queue == V3D_BIN)
+ switch (f->queue) {
+ case V3D_BIN:
return "v3d-bin";
- else
+ case V3D_RENDER:
return "v3d-render";
+ case V3D_TFU:
+ return "v3d-tfu";
+ default:
+ return NULL;
+ }
}
const struct dma_fence_ops v3d_fence_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b88c96911453..05ca6319065e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -207,32 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d)
}
static void
-v3d_attach_object_fences(struct v3d_exec_info *exec)
+v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
+ struct dma_fence *fence)
{
- struct dma_fence *out_fence = exec->render_done_fence;
- struct v3d_bo *bo;
int i;
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
+ for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bo->resv, out_fence);
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
}
}
static void
-v3d_unlock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_unlock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
-
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (i = 0; i < bo_count; i++)
+ ww_mutex_unlock(&bos[i]->resv->lock);
ww_acquire_fini(acquire_ctx);
}
@@ -245,19 +239,19 @@ v3d_unlock_bo_reservations(struct drm_device *dev,
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
-v3d_lock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_lock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int contended_lock = -1;
int i, ret;
- struct v3d_bo *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
+
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -266,23 +260,20 @@ retry:
}
}
- for (i = 0; i < exec->bo_count; i++) {
+ for (i = 0; i < bo_count; i++) {
if (i == contended_lock)
continue;
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+ ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
+ acquire_ctx);
if (ret) {
int j;
- for (j = 0; j < i; j++) {
- bo = to_v3d_bo(&exec->bo[j]->base);
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&bos[j]->resv->lock);
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
ww_mutex_unlock(&bo->resv->lock);
}
@@ -302,12 +293,11 @@ retry:
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ for (i = 0; i < bo_count; i++) {
+ ret = reservation_object_reserve_shared(bos[i]->resv, 1);
if (ret) {
- v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
+ v3d_unlock_bo_reservations(bos, bo_count,
+ acquire_ctx);
return ret;
}
}
@@ -430,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec)
kref_put(&exec->refcount, v3d_exec_cleanup);
}
+static void
+v3d_tfu_job_cleanup(struct kref *ref)
+{
+ struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
+ refcount);
+ struct v3d_dev *v3d = job->v3d;
+ unsigned int i;
+
+ dma_fence_put(job->in_fence);
+ dma_fence_put(job->done_fence);
+
+ for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
+ if (job->bo[i])
+ drm_gem_object_put_unlocked(&job->bo[i]->base);
+ }
+
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
+ kfree(job);
+}
+
+void v3d_tfu_job_put(struct v3d_tfu_job *job)
+{
+ kref_put(&job->refcount, v3d_tfu_job_cleanup);
+}
+
int
v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -504,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj *sync_out;
int ret = 0;
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
if (args->pad != 0) {
DRM_INFO("pad must be zero: %d\n", args->pad);
return -EINVAL;
@@ -547,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
+ &acquire_ctx);
if (ret)
goto fail;
@@ -581,15 +601,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
&v3d_priv->sched_entity[V3D_RENDER]);
mutex_unlock(&v3d->sched_lock);
- v3d_attach_object_fences(exec);
+ v3d_attach_object_fences(exec->bo, exec->bo_count,
+ exec->render_done_fence);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
/* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) {
- drm_syncobj_replace_fence(sync_out, 0,
- exec->render_done_fence);
+ drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
drm_syncobj_put(sync_out);
}
@@ -599,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
fail:
v3d_exec_put(exec);
return ret;
}
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_tfu_job *job;
+ struct ww_acquire_ctx acquire_ctx;
+ struct drm_syncobj *sync_out;
+ struct dma_fence *sched_done_fence;
+ int ret = 0;
+ int bo_count;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0) {
+ kfree(job);
+ return ret;
+ }
+
+ kref_init(&job->refcount);
+
+ ret = drm_syncobj_find_fence(file_priv, args->in_sync,
+ 0, 0, &job->in_fence);
+ if (ret == -EINVAL)
+ goto fail;
+
+ job->args = *args;
+ job->v3d = v3d;
+
+ spin_lock(&file_priv->table_lock);
+ for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[bo_count])
+ break;
+
+ bo = idr_find(&file_priv->object_idr,
+ args->bo_handles[bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ bo_count, args->bo_handles[bo_count]);
+ ret = -ENOENT;
+ spin_unlock(&file_priv->table_lock);
+ goto fail;
+ }
+ drm_gem_object_get(bo);
+ job->bo[bo_count] = to_v3d_bo(bo);
+ }
+ spin_unlock(&file_priv->table_lock);
+
+ ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ ret = drm_sched_job_init(&job->base,
+ &v3d_priv->sched_entity[V3D_TFU],
+ v3d_priv);
+ if (ret)
+ goto fail_unreserve;
+
+ sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
+
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+
+ /* Update the return sync object */
+ sync_out = drm_syncobj_find(file_priv, args->out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, sched_done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ dma_fence_put(sched_done_fence);
+
+ v3d_tfu_job_put(job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+fail:
+ v3d_tfu_job_put(job);
+
+ return ret;
+}
+
int
v3d_gem_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e07514eb11b5..69338da70ddc 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -4,8 +4,8 @@
/**
* DOC: Interrupt management for the V3D engine
*
- * When we take a binning or rendering flush done interrupt, we need
- * to signal the fence for that job so that the scheduler can queue up
+ * When we take a bin, render, or TFU done interrupt, we need to
+ * signal the fence for that job so that the scheduler can queue up
* the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
@@ -15,6 +15,7 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
+#include "v3d_trace.h"
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
@@ -23,7 +24,8 @@
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \
- V3D_HUB_INT_MMU_CAP))
+ V3D_HUB_INT_MMU_CAP | \
+ V3D_HUB_INT_TFUC))
static void
v3d_overflow_mem_work(struct work_struct *work)
@@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- dma_fence_signal(v3d->bin_job->bin.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->bin_job->bin.done_fence);
+
+ trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- dma_fence_signal(v3d->render_job->render.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->render_job->render.done_fence);
+
+ trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
@@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg)
/* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
+ if (intsts & V3D_HUB_INT_TFUC) {
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->tfu_job->done_fence);
+
+ trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
+ status = IRQ_HANDLED;
+ }
+
if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index c3a5e4e44f73..6ccdee9d47bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -86,6 +86,55 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
+#define V3D_TFU_CS 0x00400
+/* Stops current job, empties input fifo. */
+# define V3D_TFU_CS_TFURST BIT(31)
+# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
+# define V3D_TFU_CS_CVTCT_SHIFT 16
+# define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8)
+# define V3D_TFU_CS_NFREE_SHIFT 8
+# define V3D_TFU_CS_BUSY BIT(0)
+
+#define V3D_TFU_SU 0x00404
+/* Interrupt when FINTTHR input slots are free (0 = disabled) */
+# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
+# define V3D_TFU_SU_FINTTHR_SHIFT 8
+/* Skips resetting the CRC at the start of CRC generation. */
+# define V3D_TFU_SU_CRCCHAIN BIT(4)
+/* skips writes, computes CRC of the image. miplevels must be 0. */
+# define V3D_TFU_SU_CRC BIT(3)
+# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
+# define V3D_TFU_SU_THROTTLE_SHIFT 0
+
+#define V3D_TFU_ICFG 0x00408
+/* Interrupt when the conversion is complete. */
+# define V3D_TFU_ICFG_IOC BIT(0)
+
+/* Input Image Address */
+#define V3D_TFU_IIA 0x0040c
+/* Input Chroma Address */
+#define V3D_TFU_ICA 0x00410
+/* Input Image Stride */
+#define V3D_TFU_IIS 0x00414
+/* Input Image U-Plane Address */
+#define V3D_TFU_IUA 0x00418
+/* Output Image Address */
+#define V3D_TFU_IOA 0x0041c
+/* Image Output Size */
+#define V3D_TFU_IOS 0x00420
+/* TFU YUV Coefficient 0 */
+#define V3D_TFU_COEF0 0x00424
+/* Use these regs instead of the defaults. */
+# define V3D_TFU_COEF0_USECOEF BIT(31)
+/* TFU YUV Coefficient 1 */
+#define V3D_TFU_COEF1 0x00428
+/* TFU YUV Coefficient 2 */
+#define V3D_TFU_COEF2 0x0042c
+/* TFU YUV Coefficient 3 */
+#define V3D_TFU_COEF3 0x00430
+
+#define V3D_TFU_CRC 0x00434
+
/* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 445b2ef03303..f7508e907536 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -30,6 +30,12 @@ to_v3d_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_job, base);
}
+static struct v3d_tfu_job *
+to_tfu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_tfu_job, base);
+}
+
static void
v3d_job_free(struct drm_sched_job *sched_job)
{
@@ -40,8 +46,18 @@ v3d_job_free(struct drm_sched_job *sched_job)
v3d_exec_put(job->exec);
}
+static void
+v3d_tfu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ v3d_tfu_job_put(job);
+}
+
/**
- * Returns the fences that the bin job depends on, one by one.
+ * Returns the fences that the bin or render job depends on, one by one.
* v3d_job_run() won't be called until all of them have been signaled.
*/
static struct dma_fence *
@@ -78,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
return fence;
}
+/**
+ * Returns the fences that the TFU job depends on, one by one.
+ * v3d_tfu_job_run() won't be called until all of them have been
+ * signaled.
+ */
+static struct dma_fence *
+v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct dma_fence *fence;
+
+ fence = job->in_fence;
+ if (fence) {
+ job->in_fence = NULL;
+ return fence;
+ }
+
+ return NULL;
+}
+
static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
@@ -149,28 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
return fence;
}
-static void
-v3d_job_timedout(struct drm_sched_job *sched_job)
+static struct dma_fence *
+v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
- struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- struct v3d_dev *v3d = exec->v3d;
- enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- enum v3d_queue q;
- u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
- u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct v3d_dev *v3d = job->v3d;
+ struct drm_device *dev = &v3d->drm;
+ struct dma_fence *fence;
- /* If the current address or return address have changed, then
- * the GPU has probably made progress and we should delay the
- * reset. This could fail if the GPU got in an infinite loop
- * in the CL, but that is pretty unlikely outside of an i-g-t
- * testcase.
- */
- if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
- job->timedout_ctca = ctca;
- job->timedout_ctra = ctra;
- return;
+ fence = v3d_fence_create(v3d, V3D_TFU);
+ if (IS_ERR(fence))
+ return NULL;
+
+ v3d->tfu_job = job;
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
+
+ V3D_WRITE(V3D_TFU_IIA, job->args.iia);
+ V3D_WRITE(V3D_TFU_IIS, job->args.iis);
+ V3D_WRITE(V3D_TFU_ICA, job->args.ica);
+ V3D_WRITE(V3D_TFU_IUA, job->args.iua);
+ V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
+ V3D_WRITE(V3D_TFU_IOS, job->args.ios);
+ V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
+ if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
+ V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
+ V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
+ V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
}
+ /* ICFG kicks off the job. */
+ V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+
+ return fence;
+}
+
+static void
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+{
+ enum v3d_queue q;
mutex_lock(&v3d->reset_lock);
@@ -195,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
}
+static void
+v3d_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_exec_info *exec = job->exec;
+ struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+ return;
+ }
+
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
+}
+
+static void
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+}
+
static const struct drm_sched_backend_ops v3d_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_job_run,
@@ -202,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = {
.free_job = v3d_job_free
};
+static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
+ .dependency = v3d_tfu_job_dependency,
+ .run_job = v3d_tfu_job_run,
+ .timedout_job = v3d_tfu_job_timedout,
+ .free_job = v3d_tfu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -232,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;
}
+ ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
+ &v3d_tfu_sched_ops,
+ hw_jobs_limit, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms),
+ "v3d_tfu");
+ if (ret) {
+ dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ ret);
+ drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
+ drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 85dd351e1e09..edd984afa33f 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -12,6 +12,28 @@
#define TRACE_SYSTEM v3d
#define TRACE_INCLUDE_FILE v3d_trace
+TRACE_EVENT(v3d_submit_cl_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea),
+ TP_ARGS(dev, ct1qba, ct1qea),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ct1qba)
+ __field(u32, ct1qea)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->ct1qba = ct1qba;
+ __entry->ct1qea = ct1qea;
+ ),
+
+ TP_printk("dev=%u, RCL 0x%08x..0x%08x",
+ __entry->dev,
+ __entry->ct1qba,
+ __entry->ct1qea)
+);
+
TRACE_EVENT(v3d_submit_cl,
TP_PROTO(struct drm_device *dev, bool is_render,
uint64_t seqno,
@@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl,
__entry->ctnqea)
);
+TRACE_EVENT(v3d_bcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_rcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_tfu_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_submit_tfu_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 iia),
+ TP_ARGS(dev, iia),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, iia)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->iia = iia;
+ ),
+
+ TP_printk("dev=%u, IIA 0x%08x",
+ __entry->dev,
+ __entry->iia)
+);
+
+TRACE_EVENT(v3d_submit_tfu,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
TRACE_EVENT(v3d_reset_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index bd6ef1f31822..4f87b03f837d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -338,6 +338,7 @@ struct vc4_plane_state {
u32 pos0_offset;
u32 pos2_offset;
u32 ptr0_offset;
+ u32 lbm_offset;
/* Offset where the plane's dlist was last stored in the
* hardware at vc4_crtc_atomic_flush() time.
@@ -369,6 +370,11 @@ struct vc4_plane_state {
* to enable background color fill.
*/
bool needs_bg_fill;
+
+ /* Mark the dlist as initialized. Useful to avoid initializing it twice
+ * when async update is not possible.
+ */
+ bool dlist_initialized;
};
static inline struct vc4_plane_state *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 41881ce4132d..aea2b8dfec17 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
exec->fence = &fence->base;
if (out_sync)
- drm_syncobj_replace_fence(out_sync, 0, exec->fence);
+ drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_update_bo_seqnos(exec, seqno);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 127468785f74..1f94b9affe4b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
return 0;
}
+ /* We know for sure we don't want an async update here. Set
+ * state->legacy_cursor_update to false to prevent
+ * drm_atomic_helper_setup_commit() from auto-completing
+ * commit->flip_done.
+ */
+ state->legacy_cursor_update = false;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 98fae4daa08c..75db62cbe468 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -129,12 +129,12 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
{
- if (dst > src)
+ if (dst == src)
+ return VC4_SCALING_NONE;
+ if (3 * dst >= 2 * src)
return VC4_SCALING_PPF;
- else if (dst < src)
- return VC4_SCALING_TPZ;
else
- return VC4_SCALING_NONE;
+ return VC4_SCALING_TPZ;
}
static bool plane_enabled(struct drm_plane_state *state)
@@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+ vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -259,14 +260,12 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
- struct drm_plane *plane = state->plane;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1;
u32 format = fb->format->format;
int num_planes = fb->format->num_planes;
- int min_scale = 1, max_scale = INT_MAX;
struct drm_crtc_state *crtc_state;
u32 h_subsample, v_subsample;
int i, ret;
@@ -278,21 +277,8 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
return -EINVAL;
}
- /* No configuring scaling on the cursor plane, since it gets
- * non-vblank-synced updates, and scaling requires LBM changes which
- * have to be vblank-synced.
- */
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
- } else {
- min_scale = 1;
- max_scale = INT_MAX;
- }
-
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
- min_scale, max_scale,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
+ INT_MAX, true, true);
if (ret)
return ret;
@@ -341,12 +327,14 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that horizontal scaling be enabled,
- * even on a plane that's otherwise 1:1. Looks like only PPF
- * works in that case, so let's pick that one.
+ /* YUV conversion requires that horizontal scaling be enabled
+ * on the UV plane even if vc4_get_scaling_mode() returned
+ * VC4_SCALING_NONE (which can happen when the down-scaling
+ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+ * case.
*/
- if (vc4_state->is_unity)
- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
} else {
vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
@@ -393,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
u32 lbm;
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
if (!vc4_state->is_yuv) {
- if (vc4_state->is_unity)
- return 0;
- else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+ if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8;
else {
/* In special cases, this multiplier might be 12. */
@@ -447,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned long irqflags;
+ u32 lbm_size;
+
+ lbm_size = vc4_lbm_size(state);
+ if (!lbm_size)
+ return 0;
+
+ if (WARN_ON(!vc4_state->lbm_offset))
+ return -EINVAL;
+
+ /* Allocate the LBM memory that the HVS will use for temporary
+ * storage due to our scaling/format conversion.
+ */
+ if (!vc4_state->lbm.allocated) {
+ int ret;
+
+ spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+ &vc4_state->lbm,
+ lbm_size, 32, 0, 0);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+
+ if (ret)
+ return ret;
+ } else {
+ WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+ }
+
+ vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
+
+ return 0;
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -464,31 +492,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
- u32 lbm_size, tiling;
- unsigned long irqflags;
+ u32 tiling;
u32 hvs_format = format->hvs;
int ret, i;
- ret = vc4_plane_setup_clipping_and_scaling(state);
- if (ret)
- return ret;
-
- /* Allocate the LBM memory that the HVS will use for temporary
- * storage due to our scaling/format conversion.
- */
- lbm_size = vc4_lbm_size(state);
- if (lbm_size) {
- if (!vc4_state->lbm.allocated) {
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
- ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
- &vc4_state->lbm,
- lbm_size, 32, 0, 0);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
- } else {
- WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
- }
- }
+ if (vc4_state->dlist_initialized)
+ return 0;
+ ret = vc4_plane_setup_clipping_and_scaling(state);
if (ret)
return ret;
@@ -712,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
+ vc4_state->lbm_offset = 0;
+
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- /* LBM Base Address. */
+ /* Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- vc4_dlist_write(vc4_state, vc4_state->lbm.start);
- }
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ vc4_state->lbm_offset = vc4_state->dlist_count++;
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
@@ -766,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+ /* Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
return 0;
}
@@ -780,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ int ret;
vc4_state->dlist_count = 0;
- if (plane_enabled(state))
- return vc4_plane_mode_set(plane, state);
- else
+ if (!plane_enabled(state))
return 0;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ return vc4_plane_allocate_lbm(state);
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
@@ -852,28 +878,59 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+ struct vc4_plane_state *vc4_state, *new_vc4_state;
- if (plane->state->fb != state->fb) {
- vc4_plane_async_set_fb(plane, state->fb);
- drm_atomic_set_fb_for_plane(plane->state, state->fb);
- }
-
- /* Set the cursor's position on the screen. This is the
- * expected change from the drm_mode_cursor_universal()
- * helper.
- */
+ drm_atomic_set_fb_for_plane(plane->state, state->fb);
plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y;
-
- /* Allow changing the start position within the cursor BO, if
- * that matters.
- */
+ plane->state->crtc_w = state->crtc_w;
+ plane->state->crtc_h = state->crtc_h;
plane->state->src_x = state->src_x;
plane->state->src_y = state->src_y;
-
- /* Update the display list based on the new crtc_x/y. */
- vc4_plane_atomic_check(plane, plane->state);
+ plane->state->src_w = state->src_w;
+ plane->state->src_h = state->src_h;
+ plane->state->src_h = state->src_h;
+ plane->state->alpha = state->alpha;
+ plane->state->pixel_blend_mode = state->pixel_blend_mode;
+ plane->state->rotation = state->rotation;
+ plane->state->zpos = state->zpos;
+ plane->state->normalized_zpos = state->normalized_zpos;
+ plane->state->color_encoding = state->color_encoding;
+ plane->state->color_range = state->color_range;
+ plane->state->src = state->src;
+ plane->state->dst = state->dst;
+ plane->state->visible = state->visible;
+
+ new_vc4_state = to_vc4_plane_state(state);
+ vc4_state = to_vc4_plane_state(plane->state);
+
+ vc4_state->crtc_x = new_vc4_state->crtc_x;
+ vc4_state->crtc_y = new_vc4_state->crtc_y;
+ vc4_state->crtc_h = new_vc4_state->crtc_h;
+ vc4_state->crtc_w = new_vc4_state->crtc_w;
+ vc4_state->src_x = new_vc4_state->src_x;
+ vc4_state->src_y = new_vc4_state->src_y;
+ memcpy(vc4_state->src_w, new_vc4_state->src_w,
+ sizeof(vc4_state->src_w));
+ memcpy(vc4_state->src_h, new_vc4_state->src_h,
+ sizeof(vc4_state->src_h));
+ memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
+ sizeof(vc4_state->x_scaling));
+ memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
+ sizeof(vc4_state->y_scaling));
+ vc4_state->is_unity = new_vc4_state->is_unity;
+ vc4_state->is_yuv = new_vc4_state->is_yuv;
+ memcpy(vc4_state->offsets, new_vc4_state->offsets,
+ sizeof(vc4_state->offsets));
+ vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
+
+ /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
+ vc4_state->dlist[vc4_state->pos0_offset] =
+ new_vc4_state->dlist[vc4_state->pos0_offset];
+ vc4_state->dlist[vc4_state->pos2_offset] =
+ new_vc4_state->dlist[vc4_state->pos2_offset];
+ vc4_state->dlist[vc4_state->ptr0_offset] =
+ new_vc4_state->dlist[vc4_state->ptr0_offset];
/* Note that we can't just call vc4_plane_write_dlist()
* because that would smash the context data that the HVS is
@@ -890,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- /* No configuring new scaling in the fast path. */
- if (plane->state->crtc_w != state->crtc_w ||
- plane->state->crtc_h != state->crtc_h ||
- plane->state->src_w != state->src_w ||
- plane->state->src_h != state->src_h)
+ struct vc4_plane_state *old_vc4_state, *new_vc4_state;
+ int ret;
+ u32 i;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ old_vc4_state = to_vc4_plane_state(plane->state);
+ new_vc4_state = to_vc4_plane_state(state);
+ if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
+ old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
+ old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
+ old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
+ vc4_lbm_size(plane->state) != vc4_lbm_size(state))
return -EINVAL;
+ /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
+ * if anything else has changed, fallback to a sync update.
+ */
+ for (i = 0; i < new_vc4_state->dlist_count; i++) {
+ if (i == new_vc4_state->pos0_offset ||
+ i == new_vc4_state->pos2_offset ||
+ i == new_vc4_state->ptr0_offset ||
+ (new_vc4_state->lbm_offset &&
+ i == new_vc4_state->lbm_offset))
+ continue;
+
+ if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1013,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
- u32 num_formats = 0;
int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
@@ -1030,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- /* Don't allow YUV in cursor planes, since that means
- * tuning on the scaler, which we don't allow for the
- * cursor.
- */
- if (type != DRM_PLANE_TYPE_CURSOR ||
- hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
- formats[num_formats++] = hvs_formats[i].drm;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
+ formats[i] = hvs_formats[i].drm;
+
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
- formats, num_formats,
+ formats, ARRAY_SIZE(formats),
modifiers, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8f8fed471e34..b5580b11a063 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -169,6 +169,12 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = NULL;
int count, width, height;
+ if (output->edid) {
+ count = drm_add_edid_modes(connector, output->edid);
+ if (count)
+ return count;
+ }
+
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
@@ -287,6 +293,8 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+ if (vgdev->has_edid)
+ drm_connector_attach_edid_property(connector);
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -378,6 +386,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
+ int i;
+
+ for (i = 0 ; i < vgdev->num_scanouts; ++i)
+ kfree(vgdev->outputs[i].edid);
virtio_gpu_fbdev_fini(vgdev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index d9287c144fe5..f7f32a885af7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -80,6 +80,7 @@ static unsigned int features[] = {
*/
VIRTIO_GPU_F_VIRGL,
#endif
+ VIRTIO_GPU_F_EDID,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 2a8aaea72af3..1deb41d42ea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -47,8 +47,8 @@
#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
/* virtgpu_drm_bus.c */
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
@@ -115,6 +115,7 @@ struct virtio_gpu_output {
struct drm_encoder enc;
struct virtio_gpu_display_one info;
struct virtio_gpu_update_cursor cursor;
+ struct edid *edid;
int cur_x;
int cur_y;
bool enabled;
@@ -131,6 +132,7 @@ struct virtio_gpu_framebuffer {
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
+ struct virtio_gpu_fence *fence;
};
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
@@ -200,6 +202,7 @@ struct virtio_gpu_device {
struct ida ctx_id_ida;
bool has_virgl_3d;
+ bool has_edid;
struct work_struct config_changed_work;
@@ -267,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -278,7 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@@ -290,6 +293,7 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
struct virtio_gpu_drv_cap_cache **cache_p);
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@@ -302,23 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence);
+ uint32_t ctx_id, struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_resource_create_3d *rc_3d);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -346,9 +349,12 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(
+ struct virtio_gpu_device *vgdev);
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a441bf..4d6826b27814 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,28 +67,43 @@ static const struct dma_fence_ops virtio_fence_ops = {
.timeline_value_str = virtio_timeline_value_str,
};
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
+ GFP_ATOMIC);
+ if (!fence)
+ return fence;
+
+ fence->drv = drv;
+ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+
+ return fence;
+}
+
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
+{
+ if (!fence)
+ return;
+
+ dma_fence_put(&fence->f);
+}
+
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags;
- *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
- if ((*fence) == NULL)
- return -ENOMEM;
-
spin_lock_irqsave(&drv->lock, irq_flags);
- (*fence)->drv = drv;
- (*fence)->seq = ++drv->sync_seq;
- dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- dma_fence_get(&(*fence)->f);
- list_add_tail(&(*fence)->node, &drv->fences);
+ fence->seq = ++drv->sync_seq;
+ dma_fence_get(&fence->f);
+ list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+ cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index bc5afa4f906e..161b80fee492 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/virtgpu_drm.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/sync_file.h>
#include "virtgpu_drv.h"
@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj;
- struct virtio_gpu_fence *fence;
+ struct virtio_gpu_fence *out_fence;
struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
@@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct ttm_validate_buffer *buflist = NULL;
int i;
struct ww_acquire_ctx ticket;
+ struct sync_file *sync_file;
+ int in_fence_fd = exbuf->fence_fd;
+ int out_fence_fd = -1;
void *buf;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
+ return -EINVAL;
+
+ exbuf->fence_fd = -1;
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(in_fence_fd);
+
+ if (!in_fence)
+ return -EINVAL;
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+ ret = dma_fence_wait(in_fence, true);
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
+ }
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0)
+ return out_fence_fd;
+ }
+
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
@@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
sizeof(struct ttm_validate_buffer),
GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unused_fd;
}
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
- kvfree(bo_handles);
- kvfree(buflist);
- return ret;
+ goto out_unused_fd;
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out_unused_fd;
}
qobj = gem_to_virtio_gpu_obj(gobj);
@@ -156,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
list_add(&buflist[i].head, &validate_list);
}
kvfree(bo_handles);
+ bo_handles = NULL;
}
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -168,22 +201,48 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ret = PTR_ERR(buf);
goto out_unresv;
}
+
+ out_fence = virtio_gpu_fence_alloc(vgdev);
+ if(!out_fence) {
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ if (out_fence_fd >= 0) {
+ sync_file = sync_file_create(&out_fence->f);
+ if (!sync_file) {
+ dma_fence_put(&out_fence->f);
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ exbuf->fence_fd = out_fence_fd;
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, &fence);
+ vfpriv->ctx_id, out_fence);
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
kvfree(buflist);
- dma_fence_put(&fence->f);
return 0;
+out_memdup:
+ kfree(buf);
out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free:
virtio_gpu_unref_list(&validate_list);
+out_unused_fd:
+ kvfree(bo_handles);
kvfree(buflist);
+
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
+
return ret;
}
@@ -283,11 +342,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
rc_3d.flags = cpu_to_le32(rc->flags);
- virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
- ret = virtio_gpu_object_attach(vgdev, qobj, &fence);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto fail_backoff;
+ }
+
+ virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
+ ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) {
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- goto fail_unref;
+ virtio_gpu_fence_cleanup(fence);
+ goto fail_backoff;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
}
@@ -312,6 +377,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
dma_fence_put(&fence->f);
}
return 0;
+fail_backoff:
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
@@ -374,10 +441,16 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
goto out_unres;
convert_to_hw_box(&box, &args->box);
+
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level,
- &box, &fence);
+ &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
@@ -423,10 +496,15 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
(vgdev, qobj, offset,
box.w, box.h, box.x, box.y, NULL);
} else {
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, &fence);
+ args->level, &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index bf609dcae224..3af6181c05a8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
@@ -55,10 +57,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
uint32_t nlen, const char *name)
{
- int handle = ida_alloc_min(&vgdev->ctx_id_ida, 1, GFP_KERNEL);
+ int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
if (handle < 0)
return handle;
+ handle += 1;
virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
return handle;
}
@@ -67,7 +70,7 @@ static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
- ida_free(&vgdev->ctx_id_ida, ctx_id);
+ ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -155,6 +158,10 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
#else
DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ vgdev->has_edid = true;
+ DRM_INFO("EDID support available.\n");
+ }
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
@@ -200,6 +207,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
@@ -266,8 +275,10 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
get_task_comm(dbgname, current);
id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
- if (id < 0)
+ if (id < 0) {
+ kfree(vfpriv);
return id;
+ }
vfpriv->ctx_id = id;
file->driver_priv = vfpriv;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 77eac4eb06b1..f39a183d59c2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -25,16 +25,21 @@
#include "virtgpu_drv.h"
-static void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid)
{
- int handle = ida_alloc_min(&vgdev->resource_ida, 1, GFP_KERNEL);
- *resid = handle;
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
+ if (handle < 0)
+ return handle;
+
+ *resid = handle + 1;
+ return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- ida_free(&vgdev->resource_ida, id);
+ ida_free(&vgdev->resource_ida, id - 1);
}
static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
@@ -94,7 +99,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret < 0) {
+ kfree(bo);
+ return ret;
+ }
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
if (ret != 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7d4483..ead5c53d4e21 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_h >> 16);
}
+static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ if (!new_state->fb)
+ return 0;
+
+ vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ if (!vgfb->fence)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct virtio_gpu_framebuffer *vgfb;
+
+ if (!plane->state->fb)
+ return;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ if (vgfb->fence)
+ virtio_gpu_fence_cleanup(vgfb->fence);
+}
+
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
- struct virtio_gpu_fence *fence = NULL;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h),
- 0, 0, &fence);
+ 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
- &fence->f);
- dma_fence_put(&fence->f);
- fence = NULL;
+ &vgfb->fence->f);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
}
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
+ .prepare_fb = virtio_gpu_cursor_prepare_fb,
+ .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_cursor_plane_update,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 51bef1775e47..e27c4aedb809 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -298,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
@@ -405,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -467,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -497,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -584,6 +584,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
wake_up(&vgdev->resp_wq);
}
+static int virtio_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct virtio_gpu_resp_edid *resp = data;
+ size_t start = block * EDID_LENGTH;
+
+ if (start + len > le32_to_cpu(resp->size))
+ return -1;
+ memcpy(buf, resp->edid + start, len);
+ return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_cmd_get_edid *cmd =
+ (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+ struct virtio_gpu_resp_edid *resp =
+ (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+ uint32_t scanout = le32_to_cpu(cmd->scanout);
+ struct virtio_gpu_output *output;
+ struct edid *new_edid, *old_edid;
+
+ if (scanout >= vgdev->num_scanouts)
+ return;
+ output = vgdev->outputs + scanout;
+
+ new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+
+ spin_lock(&vgdev->display_info_lock);
+ old_edid = output->edid;
+ output->edid = new_edid;
+ drm_connector_update_edid_property(&output->conn, output->edid);
+ spin_unlock(&vgdev->display_info_lock);
+
+ kfree(old_edid);
+ wake_up(&vgdev->resp_wq);
+}
+
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -686,6 +725,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_cmd_get_edid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+ int scanout;
+
+ if (WARN_ON(!vgdev->has_edid))
+ return -EINVAL;
+
+ for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+ cmd_p->scanout = cpu_to_le32(scanout);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+
+ return 0;
+}
+
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name)
{
@@ -753,8 +820,7 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_resource_create_3d *rc_3d)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -766,7 +832,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->hdr.flags = 0;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
}
@@ -775,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -803,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -823,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence)
+ uint32_t ctx_id, struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -843,7 +909,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
@@ -896,11 +962,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_fence *fence;
if (use_dma_api && obj->mapped) {
+ struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index a3d57e0f5ee5..83087877565c 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -68,7 +68,6 @@ static struct drm_driver vkms_driver = {
.release = vkms_release,
.fops = &vkms_driver_fops,
.dumb_create = vkms_dumb_create,
- .dumb_map_offset = vkms_dumb_map,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1c93990693e3..e4469cd3d254 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -127,9 +127,6 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset);
-
void vkms_gem_free_object(struct drm_gem_object *obj);
int vkms_gem_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index d04e988b4cbe..80311daed47a 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -153,32 +153,6 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- if (!obj->filp) {
- ret = -EINVAL;
- goto unref;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto unref;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-unref:
- drm_gem_object_put_unlocked(obj);
-
- return ret;
-}
-
static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
{
struct drm_gem_object *gem_obj = &vkms_obj->gem;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 7041007396ae..418817600ad1 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
return NULL;
crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
- if (WARN_ON(!crc_data))
- DRM_INFO("Couldn't allocate crc_data");
+ if (!crc_data) {
+ DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+ kfree(vkms_state);
+ return NULL;
+ }
vkms_state->crc_data = crc_data;
@@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_gem_object *gem_obj;
- struct vkms_gem_object *vkms_obj;
int ret;
if (!state->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
ret = vkms_gem_vmap(gem_obj);
if (ret)
DRM_ERROR("vmap failed: %d\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b9c078860a7c..9fd8b4e75a8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -665,7 +665,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 28df788da44e..d7f6cb9331de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -466,15 +466,6 @@ struct vmw_private {
uint32_t num_displays;
/*
- * Currently requested_layout_mutex is used to protect the gui
- * positionig state in display unit. With that use case currently this
- * mutex is only taken during layout ioctl and atomic check_modeset.
- * Other display unit state can be protected with this mutex but that
- * needs careful consideration.
- */
- struct mutex requested_layout_mutex;
-
- /*
* Framebuffer info.
*/
@@ -484,8 +475,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- unsigned num_implicit;
- struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..260650bb5560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f87261545f2c..301260e23e52 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -906,13 +906,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
- struct drm_file *file_priv;
-
if (unlikely(event == NULL))
return;
- file_priv = event->file_priv;
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e6b11f6ae2e4..b351fb5214d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_damage_helper.h>
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -456,21 +457,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = state->crtc;
struct vmw_connector_state *vcs;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
vcs = vmw_connector_state_to_vcs(du->connector.state);
-
- /* Only one active implicit framebuffer at a time. */
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (vcs->is_implicit && dev_priv->implicit_fb &&
- !(dev_priv->num_implicit == 1 && du->active_implicit)
- && dev_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple implicit framebuffers "
- "not supported.\n");
- ret = -EINVAL;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
}
@@ -846,58 +834,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
kfree(vfbs);
}
-static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(framebuffer);
- struct drm_clip_rect norect;
- int ret, inc = 1;
-
- /* Legacy Display Unit does not support 3D */
- if (dev_priv->active_display_unit == vmw_du_legacy)
- return -EINVAL;
-
- drm_modeset_lock_all(dev_priv->dev);
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
- return ret;
- }
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- inc = 2; /* skip source rects */
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
-
- vmw_fifo_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- drm_modeset_unlock_all(dev_priv->dev);
-
- return 0;
-}
-
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a buffer-object backed framebuffer.
@@ -941,7 +877,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
- .dirty = vmw_framebuffer_surface_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -1084,16 +1020,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
switch (dev_priv->active_display_unit) {
- case vmw_du_screen_target:
- ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
- clips, NULL, num_clips, increment,
- true, true, NULL);
- break;
- case vmw_du_screen_object:
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
- break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment);
@@ -1112,9 +1038,25 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+ color, clips, num_clips);
+
+ return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+ clips, num_clips);
+}
+
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.destroy = vmw_framebuffer_bo_destroy,
- .dirty = vmw_framebuffer_bo_dirty,
+ .dirty = vmw_framebuffer_bo_dirty_ext,
};
/**
@@ -1565,6 +1507,88 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
}
/**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state) {
+ lockdep_assert_held(&crtc->mutex.mutex.base);
+ } else {
+ int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+ if (ret != 0 && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->state;
+ }
+
+ return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ * Zero on success,
+ * -EINVAL on invalid state,
+ * -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_framebuffer *implicit_fb = NULL;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!du->is_implicit)
+ continue;
+
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state || !crtc_state->enable)
+ continue;
+
+ /*
+ * Can't move primary planes across crtcs, so this is OK.
+ * It also means we don't need to take the plane mutex.
+ */
+ plane_state = du->primary.state;
+ if (plane_state->crtc != crtc)
+ continue;
+
+ if (!implicit_fb)
+ implicit_fb = plane_state->fb;
+ else if (implicit_fb != plane_state->fb)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
@@ -1575,7 +1599,6 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
@@ -1587,19 +1610,31 @@ static int vmw_kms_check_topology(struct drm_device *dev,
if (!rects)
return -ENOMEM;
- mutex_lock(&dev_priv->requested_layout_mutex);
-
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_crtc_state *crtc_state;
i = drm_crtc_index(crtc);
- if (crtc_state && crtc_state->enable) {
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto clean;
+ }
+
+ if (!crtc_state)
+ continue;
+
+ if (crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ } else {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
}
}
@@ -1611,14 +1646,6 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable) {
- rects[i].x1 = 0;
- rects[i].y1 = 0;
- rects[i].x2 = 0;
- rects[i].y2 = 0;
- continue;
- }
-
if (!du->pref_active) {
ret = -EINVAL;
goto clean;
@@ -1639,18 +1666,12 @@ static int vmw_kms_check_topology(struct drm_device *dev,
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
-
- rects[i].x1 = du->gui_x;
- rects[i].y1 = du->gui_y;
- rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
- rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
- mutex_unlock(&dev_priv->requested_layout_mutex);
kfree(rects);
return ret;
}
@@ -1681,6 +1702,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
if (ret)
return ret;
+ ret = vmw_kms_check_implicit(dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset)
return ret;
@@ -2003,11 +2028,25 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* Currently gui_x/y is protected with the crtc mutex */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret < 0) {
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ goto out_fini;
+ }
+ }
- /*
- * Currently only gui_x/y is protected with requested_layout_mutex.
- */
- mutex_lock(&dev_priv->requested_layout_mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
du = vmw_connector_to_du(con);
@@ -2026,9 +2065,7 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->requested_layout_mutex);
- mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
@@ -2048,10 +2085,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
con->status = vmw_du_connector_detect(con, true);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev);
-
+out_fini:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
+
return 0;
}
@@ -2275,84 +2315,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
return 1;
}
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
-
- if (property == dev_priv->implicit_placement_property)
- du->is_implicit = val;
-
- return 0;
-}
-
-
-
-/**
- * vmw_du_connector_atomic_set_property - Atomic version of get property
- *
- * @crtc - crtc the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
-
- if (property == dev_priv->implicit_placement_property) {
- vcs->is_implicit = val;
-
- /*
- * We should really be doing a drm_atomic_commit() to
- * commit the new state, but since this doesn't cause
- * an immedate state change, this is probably ok
- */
- du->is_implicit = vcs->is_implicit;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-/**
- * vmw_du_connector_atomic_get_property - Atomic version of get property
- *
- * @connector - connector the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-
- if (property == dev_priv->implicit_placement_property)
- *val = vcs->is_implicit;
- else {
- DRM_ERROR("Invalid Property %s\n", property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -2742,143 +2704,25 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
- *
- * @dev_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- */
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (du->active_implicit) {
- if (--(dev_priv->num_implicit) == 0)
- dev_priv->implicit_fb = NULL;
- du->active_implicit = false;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
- *
- * @vmw_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- * @vfb: The implicit framebuffer
- *
- * Registers a binding to an implicit framebuffer.
- */
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
-
- if (!du->active_implicit && du->is_implicit) {
- dev_priv->implicit_fb = vfb;
- du->active_implicit = true;
- dev_priv->num_implicit++;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc we want to flip.
- *
- * Returns true or false depending whether it's OK to flip this crtc
- * based on the criterion that we must not have more than one implicit
- * frame-buffer at any one time.
- */
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- bool ret;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
- ret = !du->is_implicit || dev_priv->num_implicit == 1;
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-
- return ret;
-}
-
-/**
- * vmw_kms_update_implicit_fb - Update the implicit fb.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc the new implicit frame-buffer is bound to.
- */
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_plane *plane = crtc->primary;
- struct vmw_framebuffer *vfb;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
-
- if (!du->is_implicit)
- goto out_unlock;
-
- vfb = vmw_framebuffer_to_vfb(plane->state->fb);
- WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
- dev_priv->implicit_fb != vfb);
-
- dev_priv->implicit_fb = vfb;
-out_unlock:
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
* vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
- * @immutable: Whether the property is immutable.
*
* Sets up the implicit placement property unless it's already set up.
*/
void
-vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable)
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
{
if (dev_priv->implicit_placement_property)
return;
dev_priv->implicit_placement_property =
drm_property_create_range(dev_priv->dev,
- immutable ?
- DRM_MODE_PROP_IMMUTABLE : 0,
+ DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
-
-}
-
-
-/**
- * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
- *
- * @set: The configuration to set.
- *
- * The vmwgfx Xorg driver doesn't assign the mode::type member, which
- * when drm_mode_set_crtcinfo is called as part of the configuration setting
- * causes it to return incorrect crtc dimensions causing severe problems in
- * the vmwgfx modesetting. So explicitly clear that member before calling
- * into drm_atomic_helper_set_config.
- */
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- if (set && set->mode)
- set->mode->type = 0;
-
- return drm_atomic_helper_set_config(set, ctx);
}
-
/**
* vmw_kms_suspend - Save modesetting state and turn modesetting off.
*
@@ -2935,3 +2779,124 @@ void vmw_kms_lost_device(struct drm_device *dev)
{
drm_atomic_helper_shutdown(dev);
}
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ struct drm_rect bb;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+ uint32_t reserved_size = 0;
+ uint32_t submit_size = 0;
+ uint32_t curr_size = 0;
+ uint32_t num_hits = 0;
+ void *cmd_start;
+ char *cmd_next;
+ int ret;
+
+ /*
+ * Iterate in advance to check if really need plane update and find the
+ * number of clips that actually are in plane src for fifo allocation.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ if (num_hits == 0)
+ return 0;
+
+ if (update->vfb->bo) {
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+ update->cpu_blit);
+ } else {
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(update->vfb, typeof(*vfbs), base);
+
+ ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ 0, NULL, NULL);
+ }
+
+ if (ret)
+ return ret;
+
+ ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+ if (ret)
+ goto out_unref;
+
+ reserved_size = update->calc_fifo_size(update, num_hits);
+ cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ if (!cmd_start) {
+ ret = -ENOMEM;
+ goto out_revert;
+ }
+
+ cmd_next = cmd_start;
+
+ if (update->post_prepare) {
+ curr_size = update->post_prepare(update, cmd_next);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ if (update->pre_clip) {
+ curr_size = update->pre_clip(update, cmd_next, num_hits);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ bb.x1 = INT_MAX;
+ bb.y1 = INT_MAX;
+ bb.x2 = INT_MIN;
+ bb.y2 = INT_MIN;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ uint32_t fb_x = clip.x1;
+ uint32_t fb_y = clip.y1;
+
+ vmw_du_translate_to_crtc(state, &clip);
+ if (update->clip) {
+ curr_size = update->clip(update, cmd_next, &clip, fb_x,
+ fb_y);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+ bb.x1 = min_t(int, bb.x1, clip.x1);
+ bb.y1 = min_t(int, bb.y1, clip.y1);
+ bb.x2 = max_t(int, bb.x2, clip.x2);
+ bb.y2 = max_t(int, bb.y2, clip.y2);
+ }
+
+ curr_size = update->post_clip(update, cmd_next, &bb);
+ submit_size += curr_size;
+
+ if (reserved_size < submit_size)
+ submit_size = 0;
+
+ vmw_fifo_commit(update->dev_priv, submit_size);
+
+ vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+ update->out_fence, NULL);
+ return ret;
+
+out_revert:
+ vmw_validation_revert(&val_ctx);
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 76ec570c0684..655abbcd4058 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -33,7 +33,123 @@
#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+ /**
+ * @calc_fifo_size: Calculate fifo size.
+ *
+ * Determine fifo size for the commands needed for update. The number of
+ * damage clips on display unit @num_hits will be passed to allocate
+ * sufficient fifo space.
+ *
+ * Return: Fifo size needed
+ */
+ uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+ uint32_t num_hits);
+
+ /**
+ * @post_prepare: Populate fifo for resource preparation.
+ *
+ * Some surface resource or buffer object need some extra cmd submission
+ * like update GB image for proxy surface and define a GMRFB for screen
+ * object. That should should be done here as this callback will be
+ * called after FIFO allocation with the address of command buufer.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+ /**
+ * @pre_clip: Populate fifo before clip.
+ *
+ * This is where pre clip related command should be populated like
+ * surface copy/DMA, etc.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits);
+
+ /**
+ * @clip: Populate fifo for clip.
+ *
+ * This is where to populate clips for surface copy/dma or blit commands
+ * if needed. This will be called times have damage in display unit,
+ * which is one if doing full update. @clip is the damage in destination
+ * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+ * framebuffer coordinate.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+ /**
+ * @post_clip: Populate fifo after clip.
+ *
+ * This is where to populate display unit update commands or blit
+ * commands.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb);
+
+ struct drm_plane *plane;
+ struct drm_plane_state *old_state;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *du;
+ struct vmw_framebuffer *vfb;
+ struct vmw_fence_obj **out_fence;
+ struct mutex *mutex;
+ bool cpu_blit;
+ bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+ struct vmw_du_update_plane base;
+ /* This member is to handle special case SOU surface update */
+ void *cmd_start;
+};
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+ struct vmw_du_update_plane base;
+ int fb_left, fb_top;
+};
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
@@ -191,8 +307,6 @@ struct vmw_plane_state {
struct vmw_connector_state {
struct drm_connector_state base;
- bool is_implicit;
-
/**
* @gui_x:
*
@@ -254,7 +368,6 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
- bool active_implicit;
int set_gui_x;
int set_gui_y;
};
@@ -334,17 +447,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du);
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb);
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
@@ -456,6 +560,20 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc);
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+ struct drm_rect *r)
+{
+ int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+ int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+ drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4b5378495eea..16be515c4c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -233,7 +233,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
+ .set_config = drm_atomic_helper_set_config,
};
@@ -263,13 +263,10 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
@@ -416,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
-
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -427,8 +423,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
-
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -447,7 +441,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
-
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
@@ -513,7 +506,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_free;
- vmw_kms_create_implicit_placement_property(dev_priv, true);
+ vmw_kms_create_implicit_placement_property(dev_priv);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 333418dc259f..cd586c52af7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,6 +29,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_sou(x) \
@@ -76,6 +77,11 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
+struct vmw_kms_sou_define_gmrfb {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+};
+
/**
* Display unit using screen objects.
*/
@@ -241,28 +247,20 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
- if (sou->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- conn_state = sou->base.connector.state;
- vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
- vmw_kms_add_active(dev_priv, &sou->base, vfb);
} else {
sou->buffer = NULL;
sou->buffer_size = 0;
-
- vmw_kms_del_active(dev_priv, &sou->base);
}
}
@@ -317,38 +315,14 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
-static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- int ret;
-
- if (!vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-}
-
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_sou_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
/*
@@ -377,13 +351,10 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
@@ -498,6 +469,263 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_define_gmrfb) +
+ sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+ struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+ int depth = update->vfb->base.format->depth;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+ gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+ gmr->body.format.colorDepth = depth;
+ gmr->body.format.reserved = 0;
+ gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+ return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_kms_sou_bo_blit *blit = cmd;
+
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = update->du->unit;
+ blit->body.srcOrigin.x = fb_x;
+ blit->body.srcOrigin.y = fb_y;
+ blit->body.destRect.left = clip->x1;
+ blit->body.destRect.top = clip->y1;
+ blit->body.destRect.right = clip->x2;
+ blit->body.destRect.bottom = clip->y2;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = false;
+ bo_update.base.intr = true;
+
+ bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+ bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+ bo_update.base.clip = vmw_sou_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+ num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ /*
+ * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+ * its bounding box is filled before iterating over all the clips. So
+ * store the FIFO start address and revisit to fill the details.
+ */
+ srf_update->cmd_start = cmd;
+
+ return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_kms_sou_dirty_cmd *blit = cmd;
+ struct vmw_framebuffer_surface *vfbs;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+ num_hits;
+
+ blit->body.srcImage.sid = vfbs->surface->res.id;
+ blit->body.destScreenId = update->du->unit;
+
+ /* Update the source and destination bounding box later in post_clip */
+ blit->body.srcRect.left = 0;
+ blit->body.srcRect.top = 0;
+ blit->body.srcRect.right = 0;
+ blit->body.srcRect.bottom = 0;
+
+ blit->body.destRect.left = 0;
+ blit->body.destRect.top = 0;
+ blit->body.destRect.right = 0;
+ blit->body.destRect.bottom = 0;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t src_x, uint32_t src_y)
+{
+ SVGASignedRect *rect = cmd;
+
+ /*
+ * rects are relative to dest bounding box rect on screen object, so
+ * translate to it later in post_clip
+ */
+ rect->left = clip->x1;
+ rect->top = clip->y1;
+ rect->right = clip->x2;
+ rect->bottom = clip->y2;
+
+ return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_rect src_bb;
+ struct vmw_kms_sou_dirty_cmd *blit;
+ SVGASignedRect *rect;
+ uint32_t num_hits;
+ int translate_src_x;
+ int translate_src_y;
+ int i;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ blit = srf_update->cmd_start;
+ rect = (SVGASignedRect *)&blit[1];
+
+ num_hits = (blit->header.size - sizeof(blit->body))/
+ sizeof(SVGASignedRect);
+
+ src_bb = *bb;
+
+ /* To translate bb back to fb src coord */
+ translate_src_x = (state->src_x >> 16) - state->crtc_x;
+ translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+ drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+ blit->body.srcRect.left = src_bb.x1;
+ blit->body.srcRect.top = src_bb.y1;
+ blit->body.srcRect.right = src_bb.x2;
+ blit->body.srcRect.bottom = src_bb.y2;
+
+ blit->body.destRect.left = bb->x1;
+ blit->body.destRect.top = bb->y1;
+ blit->body.destRect.right = bb->x2;
+ blit->body.destRect.bottom = bb->y2;
+
+ /* rects are relative to dest bb rect */
+ for (i = 0; i < num_hits; i++) {
+ rect->left -= bb->x1;
+ rect->top -= bb->y1;
+ rect->right -= bb->x1;
+ rect->bottom -= bb->y1;
+ rect++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_surface srf_update;
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+ srf_update.base.plane = plane;
+ srf_update.base.old_state = old_state;
+ srf_update.base.dev_priv = dev_priv;
+ srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.base.vfb = vfb;
+ srf_update.base.out_fence = out_fence;
+ srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.base.cpu_blit = false;
+ srf_update.base.intr = true;
+
+ srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+ srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+ srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+ srf_update.base.clip = vmw_sou_surface_clip_rect;
+ srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+ return vmw_du_helper_plane_update(&srf_update.base);
+}
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
@@ -508,47 +736,28 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct vmw_fence_obj *fence = NULL;
int ret;
+ /* In case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
-
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
if (vfb->bo)
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ ret = vmw_sou_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, &fence, crtc);
-
- /*
- * We cannot really fail this function, so if we do, then output
- * an error and maintain consistent atomic state.
- */
+ ret = vmw_sou_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
} else {
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so should really blank
- * the screen object display unit, if not already done.
- */
+ /* Do nothing when fb and crtc is NULL (blank crtc) */
return;
}
+ /* For error case vblank event is send from vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
@@ -639,7 +848,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
primary = &sou->base.primary;
cursor = &sou->base.cursor;
- sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
@@ -665,6 +873,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -692,8 +901,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
-
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -732,12 +939,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- sou->base.is_implicit);
-
return 0;
err_free_unregister:
@@ -763,15 +964,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->num_implicit = 0;
- dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
return ret;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index c3e435f444c1..096c2941a8e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -92,6 +92,10 @@ struct vmw_stdu_surface_copy {
SVGA3dCmdSurfaceCopy body;
};
+struct vmw_stdu_update_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+};
/**
* struct vmw_screen_target_display_unit
@@ -396,13 +400,8 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
- if (stdu->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
vmw_svga_enable(dev_priv);
ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
@@ -417,27 +416,9 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
-
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
- struct drm_framebuffer *fb;
-
-
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
- fb = plane_state->fb;
-
- vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
-
- if (vfb)
- vmw_kms_add_active(dev_priv, &stdu->base, vfb);
- else
- vmw_kms_del_active(dev_priv, &stdu->base);
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -472,49 +453,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
- *
- * @crtc: CRTC to attach FB to
- * @fb: FB to attach
- * @event: Event to be posted. This event should've been alloced
- * using k[mz]alloc, and should've been completely initialized.
- * @page_flip_flags: Input flags.
- *
- * If the STDU uses the same display and content buffers, i.e. a true flip,
- * this function will replace the existing display buffer with the new content
- * buffer.
- *
- * If the STDU uses different display and content buffers, i.e. a blit, then
- * only the content buffer will be updated.
- *
- * RETURNS:
- * 0 on success, error code on failure
- */
-static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- int ret;
-
- if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-
-/**
* vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
@@ -986,8 +924,8 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_stdu_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
@@ -1042,13 +980,10 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
@@ -1256,11 +1191,402 @@ out_srf_unref:
return ret;
}
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_update_gb_image) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_stdu_dma *cmd_dma = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd_dma->header.size = sizeof(cmd_dma->body) +
+ sizeof(struct SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+ cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+ cmd_dma->body.host.sid = stdu->display_srf->res.id;
+ cmd_dma->body.host.face = 0;
+ cmd_dma->body.host.mipmap = 0;
+ cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+ return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+ bb->y1, bb->y2);
+
+ return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = INT_MAX;
+ bo_update->fb_top = INT_MAX;
+
+ return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+ bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+ return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_buffer *bo_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+
+ bo_update = container_of(update, typeof(*bo_update), base);
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ width = bb->x2 - bb->x1;
+ height = bb->y2 - bb->y1;
+
+ diff.cpp = stdu->cpp;
+
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+ src_bo = &vfbbo->buffer->base;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+ src_offset, src_pitch, width * stdu->cpp, height,
+ &diff);
+
+ if (drm_rect_visible(&diff.rect)) {
+ SVGA3dBox *box = &cmd_img->body.box;
+
+ cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_img->header.size = sizeof(cmd_img->body);
+ cmd_img->body.image.sid = stdu->display_srf->res.id;
+ cmd_img->body.image.face = 0;
+ cmd_img->body.image.mipmap = 0;
+
+ box->x = diff.rect.x1;
+ box->y = diff.rect.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&diff.rect);
+ box->h = drm_rect_height(&diff.rect);
+ box->d = 1;
+
+ cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+ vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+ diff.rect.x1, diff.rect.x2,
+ diff.rect.y1, diff.rect.y2);
+
+ return sizeof(*cmd_img) + sizeof(*cmd_update);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ bo_update.base.intr = false;
+
+ /*
+ * VM without 3D support don't have surface DMA command and framebuffer
+ * should be moved out of VRAM.
+ */
+ if (bo_update.base.cpu_blit) {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+ } else {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+ bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+ bo_update.base.clip = vmw_stdu_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+ }
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+ num_hits + sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct vmw_stdu_update_gb_image *cmd_update = cmd;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ uint32_t copy_size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ /*
+ * proxy surface is special where a buffer object type fb is wrapped
+ * in a surface and need an update gb image command to sync with device.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ SVGA3dBox *box = &cmd_update->body.box;
+
+ cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_update->header.size = sizeof(cmd_update->body);
+ cmd_update->body.image.sid = vfbs->surface->res.id;
+ cmd_update->body.image.face = 0;
+ cmd_update->body.image.mipmap = 0;
+
+ box->x = clip.x1;
+ box->y = clip.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&clip);
+ box->h = drm_rect_height(&clip);
+ box->d = 1;
+
+ copy_size += sizeof(*cmd_update);
+ cmd_update++;
+ }
+
+ return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+ struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+ num_hits;
+ cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+ return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t fb_x,
+ uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+ bb->y2);
+ return sizeof(struct vmw_stdu_update);
+}
/**
- * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
*
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane srf_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+
+ stdu = vmw_crtc_to_stdu(plane->state->crtc);
+ vfbs = container_of(vfb, typeof(*vfbs), base);
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+ srf_update.plane = plane;
+ srf_update.old_state = old_state;
+ srf_update.dev_priv = dev_priv;
+ srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.vfb = vfb;
+ srf_update.out_fence = out_fence;
+ srf_update.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.cpu_blit = false;
+ srf_update.intr = true;
+
+ if (vfbs->is_bo_proxy)
+ srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+ if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+ srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+ srf_update.clip = vmw_stdu_surface_populate_clip;
+ } else {
+ srf_update.calc_fifo_size =
+ vmw_stdu_surface_fifo_size_same_display;
+ }
+
+ srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+ return vmw_du_helper_plane_update(&srf_update);
+}
+
+/**
+ * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
* @old_state: Only used to get crtc info
*
@@ -1277,17 +1603,14 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc;
struct vmw_screen_target_display_unit *stdu;
struct drm_pending_vblank_event *event;
+ struct vmw_fence_obj *fence = NULL;
struct vmw_private *dev_priv;
int ret;
- /*
- * We cannot really fail this function, so if we do, then output an
- * error and maintain consistent atomic state.
- */
+ /* If case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
@@ -1295,23 +1618,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
if (vfb->bo)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
- &vclips, 1, 1, true, false,
- crtc);
+ ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, NULL, crtc);
+ ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
} else {
@@ -1319,12 +1636,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so blank the screen
- * target display unit, if not already done.
- */
+ /* Blank STDU when fb and crtc are NULL */
if (!stdu->defined)
return;
@@ -1339,36 +1651,25 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
+ /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
- if (event && (ret == 0)) {
- struct vmw_fence_obj *fence = NULL;
+ if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-
- /*
- * If fence is NULL, then already sync.
- */
- if (fence) {
- ret = vmw_event_fence_action_queue(
- file_priv, fence, &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- if (ret)
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
-
- vmw_fence_obj_unreference(&fence);
- }
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
}
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -1456,11 +1757,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
-
- /*
- * Remove this after enabling atomic because property values can
- * only exist in a state object
- */
stdu->base.is_implicit = false;
/* Initialize primary plane */
@@ -1477,6 +1773,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -1505,7 +1802,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, false);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1543,11 +1839,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- stdu->base.is_implicit);
return 0;
err_free_unregister:
@@ -1616,8 +1907,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);